code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , "tf_padding" ) )
self.parent.assertTrue(hasattr(snake_case , "depth_multiplier" ) )
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :List[str] , snake_case :Dict=13 , snake_case :Optional[int]=3 , snake_case :Any=32 , snake_case :Any=0.25 , snake_case :Union[str, Any]=8 , snake_case :List[Any]=8 , snake_case :List[Any]=6 , snake_case :Dict=32 , snake_case :Dict=True , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Tuple="relu6" , snake_case :List[Any]=1_280 , snake_case :Optional[Any]=0.1 , snake_case :int=0.02 , snake_case :Optional[Any]=True , snake_case :List[str]=True , snake_case :List[str]=10 , snake_case :Optional[Any]=None , ):
'''simple docstring'''
A_ : Union[str, Any] = parent
A_ : Tuple = batch_size
A_ : str = num_channels
A_ : Tuple = image_size
A_ : List[str] = depth_multiplier
A_ : str = depth_divisible_by
A_ : Union[str, Any] = min_depth
A_ : Optional[int] = expand_ratio
A_ : Dict = tf_padding
A_ : List[str] = output_stride
A_ : Tuple = first_layer_is_expansion
A_ : Optional[Any] = finegrained_output
A_ : Dict = hidden_act
A_ : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
A_ : Tuple = classifier_dropout_prob
A_ : List[Any] = use_labels
A_ : Tuple = is_training
A_ : Optional[int] = num_labels
A_ : Tuple = initializer_range
A_ : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[Any] = None
A_ : Optional[int] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :str , snake_case :List[str] , snake_case :str , snake_case :Dict ):
'''simple docstring'''
A_ : List[Any] = MobileNetVaModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : Any = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[int] , snake_case :Optional[int] , snake_case :Optional[Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : Dict = MobileNetVaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : List[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :int , snake_case :Dict , snake_case :int , snake_case :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.num_labels
A_ : List[str] = MobileNetVaForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
A_ : Any = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
A_ : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : Any = config_and_inputs
A_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Any = MobileNetVaModelTester(self )
A_ : List[str] = MobileNetVaConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(snake_case )
A_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Any = [*signature.parameters.keys()]
A_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
def check_hidden_states_output(snake_case :List[Any] , snake_case :Union[str, Any] , snake_case :str ):
A_ : int = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
A_ : Dict = model(**self._prepare_for_class(snake_case , snake_case ) )
A_ : Union[str, Any] = outputs.hidden_states
A_ : List[str] = 16
self.assertEqual(len(snake_case ) , snake_case )
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Dict = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = MobileNetVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __snake_case ( ) -> Union[str, Any]:
A_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(snake_case )
A_ : int = self.default_image_processor
A_ : Optional[Any] = prepare_img()
A_ : Union[str, Any] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**snake_case )
# verify the logits
A_ : Optional[int] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , snake_case )
A_ : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[Any] = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
A_ : Tuple = model.to(snake_case )
A_ : str = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
A_ : Tuple = prepare_img()
A_ : str = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Optional[Any] = model(**snake_case )
A_ : Any = outputs.logits
# verify the logits
A_ : Any = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , snake_case )
A_ : Tuple = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1e-4 ) )
| 300
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase )
| 282
| 0
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( __A ):
__A : Dict = (DDIMParallelScheduler,)
__A : int = (("eta", 0.0), ("num_inference_steps", 50))
def __UpperCamelCase ( self : Optional[Any] , **lowercase_ : str ) -> Union[str, Any]:
lowercase__ : Dict = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowercase_ )
return config
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[Any] ) -> int:
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : Any = self.get_scheduler_config(**lowercase_ )
lowercase__ : Optional[int] = scheduler_class(**lowercase_ )
lowercase__ , lowercase__ : Any = 10, 0.0
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for t in scheduler.timesteps:
lowercase__ : List[Any] = model(lowercase_ , lowercase_ )
lowercase__ : str = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def __UpperCamelCase ( self : str ) -> Dict:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config(steps_offset=1 )
lowercase__ : List[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def __UpperCamelCase ( self : List[str] ) -> Dict:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def __UpperCamelCase ( self : str ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def __UpperCamelCase ( self : int ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase_ )
def __UpperCamelCase ( self : int ) -> List[Any]:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase_ , eta=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def __UpperCamelCase ( self : Tuple ) -> Dict:
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Optional[int] = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**lowercase_ )
lowercase__ , lowercase__ : Dict = 10, 0.0
scheduler.set_timesteps(lowercase_ )
lowercase__ : int = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : Optional[Any] = self.dummy_sample_deter + 0.1
lowercase__ : int = self.dummy_sample_deter - 0.1
lowercase__ : Any = samplea.shape[0]
lowercase__ : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase__ : List[Any] = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
lowercase__ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase__ : List[Any] = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ )
lowercase__ : Union[str, Any] = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __UpperCamelCase ( self : List[str] ) -> Dict:
lowercase__ : Dict = self.full_loop()
lowercase__ : Dict = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : List[str] = self.full_loop(prediction_type="v_prediction" )
lowercase__ : List[str] = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
lowercase__ : Optional[Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
lowercase__ : int = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
lowercase__ : List[Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
lowercase__ : str = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 333
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : int = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )
if "model" in sd.keys():
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
# pop unnecessary weights
lowerCAmelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCAmelCase : Any = sd.pop(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCAmelCase : Dict = sd[key]
# We split QKV in separate Q,K,V
lowerCAmelCase : Optional[Any] = key.replace(".qkv_proj." , ".q_proj." )
lowerCAmelCase : str = key.replace(".qkv_proj." , ".k_proj." )
lowerCAmelCase : Tuple = key.replace(".qkv_proj." , ".v_proj." )
lowerCAmelCase : List[str] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = torch.split(SCREAMING_SNAKE_CASE , depth // 3 , dim=0 )
lowerCAmelCase : List[Any] = q
lowerCAmelCase : str = k
lowerCAmelCase : Any = v
del sd[key]
return sd
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
lowerCAmelCase : Any = load_checkpoint(SCREAMING_SNAKE_CASE )
if config is not None:
lowerCAmelCase : Optional[Any] = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Optional[int] = OPTConfig()
lowerCAmelCase : Any = OPTModel(SCREAMING_SNAKE_CASE ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check results
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowerCAmelCase__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 108
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 78
| 0
|
def SCREAMING_SNAKE_CASE ( lowercase_ = 50 ) -> int:
"""simple docstring"""
A__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 231
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : str = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
A_ : Dict = logging.getLogger(__name__)
class _a (A__ ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ , A__=None ):
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
A__ : int = None
def __A ( self , A__ ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
A__ : Optional[int] = self._infer_socket_ifname()
# avoid clash with the NCCL port
A__ : Dict = str(distributed_port + 1 )
A__ : Tuple = dist.new_group(ranks=A__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self , A__ , A__ , A__=torch.floataa ):
A__ : Optional[Any] = torch.empty(A__ , dtype=A__ )
dist.scatter(A__ , src=0 , scatter_list=A__ , group=self.process_group )
return target_tensor
def __A ( self ):
A__ : List[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A__ : Dict = next((addr for addr in addrs if addr.startswith("""e""" )) , A__ )
return ifname
def __A ( self , A__ , A__ ):
# single GPU training
if not dist.is_initialized():
A__ , A__ : int = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
# distributed training
A__ : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
A__ : Optional[Any] = None
if self._is_main():
A__ : str = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A__ )]
dist.gather(torch.tensor(A__ ) , dst=0 , gather_list=A__ , group=self.process_group )
# scatter logic
A__ : Union[str, Any] = question_hidden_states.shape[0]
A__ : Union[str, Any] = []
A__ : Optional[int] = []
if self._is_main():
assert len(A__ ) == world_size
A__ , A__ : str = self._main_retrieve(torch.cat(A__ ).numpy() , A__ )
A__ , A__ : Tuple = torch.tensor(A__ ), torch.tensor(A__ )
A__ : List[Any] = self._chunk_tensor(A__ , A__ )
A__ : Tuple = self._chunk_tensor(A__ , A__ )
A__ : str = self._scattered(A__ , [n_queries, n_docs] , target_type=torch.intaa )
A__ : List[Any] = self._scattered(A__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A__ )
| 192
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ : List[Any] = get_logger(__name__)
class _snake_case ( enum.Enum ):
_lowercase : Any = '''all_checks'''
_lowercase : str = '''basic_checks'''
_lowercase : str = '''no_checks'''
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if expected_checksums is None:
logger.info('Unable to verify checksums.')
return
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE = ' for ' + verification_name if verification_name is not None else ''
if len(_UpperCAmelCase) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error')
logger.info('All the checksums matched successfully' + for_verification_name)
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
pass
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if expected_splits is None:
logger.info('Unable to verify splits sizes.')
return
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
if len(set(_UpperCAmelCase) - set(_UpperCAmelCase)) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase) - set(_UpperCAmelCase)))
SCREAMING_SNAKE_CASE = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase))
logger.info('All the splits matched successfully.')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = True):
if record_checksum:
SCREAMING_SNAKE_CASE = shaaaa()
with open(_UpperCAmelCase , 'rb') as f:
for chunk in iter(lambda: f.read(1 << 20) , B''):
m.update(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = m.hexdigest()
else:
SCREAMING_SNAKE_CASE = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase), "checksum": checksum}
def lowerCamelCase__ (_UpperCAmelCase):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 137
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase : Tuple = ''''''
else:
UpperCamelCase : Any = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase : List[str] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
UpperCamelCase : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : int = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase : str = in_proj_bias[: config.hidden_size]
UpperCamelCase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : int = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = val
def a ( ):
"""simple docstring"""
UpperCamelCase : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase : Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=True ):
"""simple docstring"""
UpperCamelCase : Tuple = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCamelCase : Any = 8
# set labels if required
if not base_model:
UpperCamelCase : List[Any] = 1_0_0_0
UpperCamelCase : str = '''huggingface/label-files'''
UpperCamelCase : Any = '''imagenet-1k-id2label.json'''
UpperCamelCase : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase : int = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase : Any = idalabel
UpperCamelCase : int = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCamelCase : Tuple = 3_8_4
UpperCamelCase : Dict = 1_5_3_6
UpperCamelCase : int = 1_2
UpperCamelCase : Optional[Any] = 6
# load original model from torch hub
UpperCamelCase : List[str] = torch.hub.load('''facebookresearch/dino:main''' , SCREAMING_SNAKE_CASE_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase : Tuple = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if base_model:
UpperCamelCase : Optional[Any] = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ ).eval()
else:
UpperCamelCase : List[Any] = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCamelCase : str = ViTImageProcessor()
UpperCamelCase : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase : Union[str, Any] = encoding['''pixel_values''']
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
if base_model:
UpperCamelCase : Optional[Any] = original_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
UpperCamelCase : str = original_model(SCREAMING_SNAKE_CASE_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
__UpperCAmelCase : List[str] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase = 13 , UpperCamelCase = 64 , UpperCamelCase = 2 , UpperCamelCase = 3 , UpperCamelCase = 3 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = 128 , UpperCamelCase=[16, 32, 64, 128] , UpperCamelCase = 7 , UpperCamelCase = 4 , UpperCamelCase = 37 , UpperCamelCase = "gelu" , UpperCamelCase = 0.1 , UpperCamelCase = 0.1 , UpperCamelCase = 10 , UpperCamelCase = 0.02 , UpperCamelCase = 2 , UpperCamelCase = 1 , UpperCamelCase = 128 , UpperCamelCase = [2, 2, 2, 2] , UpperCamelCase = 2 , UpperCamelCase = 2 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = num_attention_outputs
lowerCamelCase_ = embed_dim
lowerCamelCase_ = embed_dim + 1
lowerCamelCase_ = resolution
lowerCamelCase_ = depths
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = dim
lowerCamelCase_ = mlp_expansion_ratio
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerModel(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = TFEfficientFormerForImageClassification(UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = TFEfficientFormerForImageClassification(UpperCamelCase )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerModelTester(self )
lowerCamelCase_ = ConfigTester(
self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowerCamelCase_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCamelCase_ = seq_length * self.model_tester.chunk_length
else:
lowerCamelCase_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCamelCase_ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCamelCase , (list, tuple) )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEfficientFormerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
lowerCamelCase_ = getattr(self.model_tester , "seq_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "key_length" , UpperCamelCase )
lowerCamelCase_ = getattr(self.model_tester , "chunk_length" , UpperCamelCase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowerCamelCase_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) , training=UpperCamelCase )
lowerCamelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def snake_case ( self ):
"""simple docstring"""
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCamelCase_ = model_class(UpperCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCamelCase_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCamelCase_ = model(UpperCamelCase )
self.assertTrue(outputs_dict is not None )
def __snake_case ( ):
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="tf" )
# forward pass
lowerCamelCase_ = model(**UpperCamelCase , training=UpperCamelCase )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="tf" )
# forward pass
lowerCamelCase_ = model(**UpperCamelCase , training=UpperCamelCase )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
| 55
|
'''simple docstring'''
import copy
import re
class A__ :
A__ = 'hp'
A__ = {}
A__ = None
@classmethod
def A ( cls : Optional[Any] , _a : Optional[Any] , _a : Any ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =prefix
_SCREAMING_SNAKE_CASE =defaults
cls.build_naming_info()
@staticmethod
def A ( _a : Optional[Any] , _a : List[Any] ) -> Any:
'''simple docstring'''
if len(_a ) == 0:
return ""
_SCREAMING_SNAKE_CASE =None
if any(char.isdigit() for char in word ):
raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_a ) + 1 ):
_SCREAMING_SNAKE_CASE =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_a : str ):
_SCREAMING_SNAKE_CASE =''
while integer != 0:
_SCREAMING_SNAKE_CASE =chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
_SCREAMING_SNAKE_CASE =0
while True:
_SCREAMING_SNAKE_CASE =word + '#' + int_to_alphabetic(_a )
if sword in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE =sword
break
_SCREAMING_SNAKE_CASE =short_word
_SCREAMING_SNAKE_CASE =word
return short_word
@staticmethod
def A ( _a : Optional[Any] , _a : int ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =param_name.split('_' )
_SCREAMING_SNAKE_CASE =[TrialShortNamer.shortname_for_word(_a , _a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_SCREAMING_SNAKE_CASE =['', '_']
for separator in separators:
_SCREAMING_SNAKE_CASE =separator.join(_a )
if shortname not in info["reverse_short_param"]:
_SCREAMING_SNAKE_CASE =shortname
_SCREAMING_SNAKE_CASE =param_name
return shortname
return param_name
@staticmethod
def A ( _a : Dict , _a : int ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TrialShortNamer.shortname_for_key(_a , _a )
_SCREAMING_SNAKE_CASE =short_name
_SCREAMING_SNAKE_CASE =param_name
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
_SCREAMING_SNAKE_CASE ={
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
_SCREAMING_SNAKE_CASE =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_a , _a )
_SCREAMING_SNAKE_CASE =info
@classmethod
def A ( cls : List[Any] , _a : int ) -> int:
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
_SCREAMING_SNAKE_CASE =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_SCREAMING_SNAKE_CASE =cls.NAMING_INFO['short_param'][k]
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =1 if v else 0
_SCREAMING_SNAKE_CASE ='' if isinstance(_a , (int, float) ) else '-'
_SCREAMING_SNAKE_CASE =f"{key}{sep}{v}"
name.append(_a )
return "_".join(_a )
@classmethod
def A ( cls : Optional[Any] , _a : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_SCREAMING_SNAKE_CASE =[]
else:
_SCREAMING_SNAKE_CASE =repr.split('_' )
_SCREAMING_SNAKE_CASE ={}
for value in values:
if "-" in value:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =value.split('-' )
else:
_SCREAMING_SNAKE_CASE =re.sub('[0-9.]' , '' , _a )
_SCREAMING_SNAKE_CASE =float(re.sub('[^0-9.]' , '' , _a ) )
_SCREAMING_SNAKE_CASE =cls.NAMING_INFO['reverse_short_param'][p_k]
_SCREAMING_SNAKE_CASE =p_v
for k in cls.DEFAULTS:
if k not in parameters:
_SCREAMING_SNAKE_CASE =cls.DEFAULTS[k]
return parameters
| 47
| 0
|
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase__ ( lowercase ):
def __init__( self : Optional[Any] ,*lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : List[Any] ):
'''simple docstring'''
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Any = eval_examples
_UpperCamelCase : Optional[int] = post_process_function
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[Dataset] = None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "eval" ,**lowerCamelCase__ : Any ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = gen_kwargs.copy()
_UpperCamelCase : int = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
_UpperCamelCase : Union[str, Any] = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
_UpperCamelCase : str = gen_kwargs
_UpperCamelCase : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCamelCase : Tuple = self.get_eval_dataloader(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : Dict = self.compute_metrics
_UpperCamelCase : str = None
_UpperCamelCase : Optional[int] = time.time()
_UpperCamelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCamelCase : Any = eval_loop(
lowerCamelCase__ ,description='Evaluation' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=lowerCamelCase__ ,metric_key_prefix=lowerCamelCase__ ,)
finally:
_UpperCamelCase : Optional[int] = compute_metrics
_UpperCamelCase : str = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowerCamelCase__ ,lowerCamelCase__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCamelCase : int = self.post_process_function(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : int = self.compute_metrics(lowerCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
_UpperCamelCase : List[Any] = metrics.pop(lowerCamelCase__ )
metrics.update(output.metrics )
else:
_UpperCamelCase : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCamelCase : Union[str, Any] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,lowerCamelCase__ )
return metrics
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : str = "test" ,**lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : int = gen_kwargs.copy()
_UpperCamelCase : List[Any] = self.get_test_dataloader(lowerCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : str = self.compute_metrics
_UpperCamelCase : int = None
_UpperCamelCase : int = time.time()
_UpperCamelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCamelCase : Optional[Any] = eval_loop(
lowerCamelCase__ ,description='Prediction' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=lowerCamelCase__ ,metric_key_prefix=lowerCamelCase__ ,)
finally:
_UpperCamelCase : Tuple = compute_metrics
_UpperCamelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowerCamelCase__ ,lowerCamelCase__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCamelCase : Dict = self.post_process_function(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,'predict' )
_UpperCamelCase : Optional[Any] = self.compute_metrics(lowerCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
_UpperCamelCase : Dict = metrics.pop(lowerCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=lowerCamelCase__ )
| 353
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = DanceDiffusionPipeline
lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : str = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=16000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=lowerCamelCase__ ,use_timestep_embedding=lowerCamelCase__ ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_UpperCamelCase : int = IPNDMScheduler()
_UpperCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int=0 ):
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
_UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCamelCase : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : str = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : int = DanceDiffusionPipeline(**lowerCamelCase__ )
_UpperCamelCase : List[str] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = output.audios
_UpperCamelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCamelCase : Dict = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = torch_device
_UpperCamelCase : Dict = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_UpperCamelCase : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = pipe(generator=lowerCamelCase__ ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_UpperCamelCase : Optional[int] = output.audios
_UpperCamelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase : Tuple = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = torch_device
_UpperCamelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_UpperCamelCase : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = pipe(generator=lowerCamelCase__ ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_UpperCamelCase : Any = output.audios
_UpperCamelCase : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase : Any = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 236
| 0
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (DDIMParallelScheduler,)
a__ = (("eta", 0.0), ("num_inference_steps", 50))
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , **lowercase__ ) -> str:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = 10, 0.0
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for t in scheduler.timesteps:
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> List[str]:
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase__ )
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(steps_offset=1 )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowerCAmelCase_ (self ) -> str:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase__ , beta_end=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
self.check_over_configs(thresholding=lowercase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , )
def lowerCAmelCase_ (self ) -> Dict:
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase__ , num_inference_steps=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase__ , eta=lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = 10, 0.0
scheduler.set_timesteps(lowercase__ )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = self.dummy_sample_deter + 0.1
__UpperCAmelCase = self.dummy_sample_deter - 0.1
__UpperCAmelCase = samplea.shape[0]
__UpperCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__UpperCAmelCase = torch.arange(lowercase__ )[0:3, None].repeat(1 , lowercase__ )
__UpperCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__UpperCAmelCase = scheduler.batch_step_no_noise(lowercase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase__ )
__UpperCAmelCase = torch.sum(torch.abs(lowercase__ ) )
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.sum(torch.abs(lowercase__ ) )
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
__UpperCAmelCase = torch.sum(torch.abs(lowercase__ ) )
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowerCAmelCase_ (self ) -> int:
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 )
__UpperCAmelCase = torch.sum(torch.abs(lowercase__ ) )
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowerCAmelCase_ (self ) -> Any:
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 )
__UpperCAmelCase = torch.sum(torch.abs(lowercase__ ) )
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 333
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333
| 1
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowercase ( lowerCamelCase : Union[str, Any] ):
return 1 / (1 + np.exp(-z ))
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Dict ):
return (-y * np.log(lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : List[Any] ):
UpperCamelCase_ : Optional[int] = np.dot(lowerCamelCase , lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(lowerCamelCase ) ) )
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple=70000 ):
UpperCamelCase_ : int = np.zeros(x.shape[1] )
for iterations in range(lowerCamelCase ):
UpperCamelCase_ : str = np.dot(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = sigmoid_function(lowerCamelCase )
UpperCamelCase_ : Optional[int] = np.dot(x.T , h - y ) / y.size
UpperCamelCase_ : int = theta - alpha * gradient # updating the weights
UpperCamelCase_ : Dict = np.dot(lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : List[Any] = sigmoid_function(lowerCamelCase )
UpperCamelCase_ : Optional[int] = cost_function(lowerCamelCase , lowerCamelCase )
if iterations % 100 == 0:
print(F"loss: {j} \t" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ = datasets.load_iris()
a_ = iris.data[:, :2]
a_ = (iris.target != 0) * 1
a_ = 0.1
a_ = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def __lowercase ( lowerCamelCase : Tuple ):
return sigmoid_function(
np.dot(lowerCamelCase , lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ = np.c_[xxa.ravel(), xxa.ravel()]
a_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 356
|
def __lowercase ( lowerCamelCase : list[int] ):
if not numbers:
return 0
if not isinstance(lowerCamelCase , (list, tuple) ) or not all(
isinstance(lowerCamelCase , lowerCamelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
UpperCamelCase_ : Optional[Any] = numbers[0]
for i in range(1 , len(lowerCamelCase ) ):
# update the maximum and minimum subarray products
UpperCamelCase_ : Tuple = numbers[i]
if number < 0:
UpperCamelCase_, UpperCamelCase_ : List[str] = min_till_now, max_till_now
UpperCamelCase_ : List[str] = max(lowerCamelCase , max_till_now * number )
UpperCamelCase_ : Dict = min(lowerCamelCase , min_till_now * number )
# update the maximum product found till now
UpperCamelCase_ : List[str] = max(lowerCamelCase , lowerCamelCase )
return max_prod
| 50
| 0
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def lowerCamelCase__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ):
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(__lowerCAmelCase : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
__lowerCAmelCase , padding="longest" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase )
lowerCAmelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config["lr"]
lowerCAmelCase_ = int(config["num_epochs"] )
lowerCAmelCase_ = int(config["seed"] )
lowerCAmelCase_ = int(config["batch_size"] )
lowerCAmelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase_ = MAX_GPU_BATCH_SIZE
set_seed(__lowerCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ = model(**__lowerCAmelCase )
lowerCAmelCase_ = outputs.loss
lowerCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ = model(**__lowerCAmelCase )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __lowerCAmelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 231
|
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : int ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : Dict , **__lowerCAmelCase : Any ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : int , **__lowerCAmelCase : Any ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
| 231
| 1
|
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _lowerCamelCase( lowercase__ ) -> List[Tuple[int, ...]]:
'''simple docstring'''
__lowercase= []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple[int, ...]:
'''simple docstring'''
__lowercase= []
for d in reversed(UpperCamelCase__ ):
idx.append(flat_idx % d )
__lowercase= flat_idx // d
return tuple(reversed(UpperCamelCase__ ) )
@torch.jit.ignore
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(lowercase__ ) -> None:
__lowercase= True
for i in range(len(UpperCamelCase__ ) ):
__lowercase= -1 * (i + 1)
l[reversed_idx] &= tally
__lowercase= l[reversed_idx]
if start_edges is None:
__lowercase= [s == 0 for s in start]
reduce_edge_list(UpperCamelCase__ )
if end_edges is None:
__lowercase= [e == (d - 1) for e, d in zip(UpperCamelCase__ , UpperCamelCase__ )]
reduce_edge_list(UpperCamelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCamelCase__ ) == 0:
return [()]
elif len(UpperCamelCase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__lowercase= []
__lowercase= []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCamelCase__ , UpperCamelCase__ ):
if s == e:
path_list.append(slice(UpperCamelCase__ , s + 1 ) )
else:
break
__lowercase= tuple(UpperCamelCase__ )
__lowercase= len(UpperCamelCase__ )
# start == end, and we're done
if divergence_idx == len(UpperCamelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowercase= start[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowercase= end[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowercase= end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> torch.Tensor:
'''simple docstring'''
__lowercase= t.shape[:no_batch_dims]
__lowercase= list(_flat_idx_to_idx(UpperCamelCase__ , UpperCamelCase__ ) )
# _get_minimal_slice_set is inclusive
__lowercase= list(_flat_idx_to_idx(flat_end - 1 , UpperCamelCase__ ) )
# Get an ordered list of slices to perform
__lowercase= _get_minimal_slice_set(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
__lowercase= [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = None , lowercase__ = False , ) -> Any:
'''simple docstring'''
if not (len(UpperCamelCase__ ) > 0):
raise ValueError('Must provide at least one input' )
__lowercase= [shape[:no_batch_dims] for shape in _fetch_dims(UpperCamelCase__ )]
__lowercase= tuple([max(UpperCamelCase__ ) for s in zip(*UpperCamelCase__ )] )
def _prep_inputs(lowercase__ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowercase= t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowercase= t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__lowercase= t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowercase= tensor_tree_map(_prep_inputs , UpperCamelCase__ )
__lowercase= None
if _out is not None:
__lowercase= tensor_tree_map(lambda lowercase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__lowercase= 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowercase= flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase__ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowercase= 0
__lowercase= prepped_outputs
for _ in range(UpperCamelCase__ ):
# Chunk the input
if not low_mem:
__lowercase= _select_chunk
else:
__lowercase= partial(
_chunk_slice , flat_start=UpperCamelCase__ , flat_end=min(UpperCamelCase__ , i + chunk_size ) , no_batch_dims=len(UpperCamelCase__ ) , )
__lowercase= tensor_tree_map(UpperCamelCase__ , UpperCamelCase__ )
# Run the layer on the chunk
__lowercase= layer(**UpperCamelCase__ )
# Allocate space for the output
if out is None:
__lowercase= tensor_tree_map(lambda lowercase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCamelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
def assign(lowercase__ , lowercase__ ) -> None:
for k, v in da.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
assign(UpperCamelCase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowercase= da[k]
assign(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for xa, xa in zip(UpperCamelCase__ , UpperCamelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowercase= xa
elif isinstance(UpperCamelCase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowercase= output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
__lowercase= tensor_tree_map(lambda lowercase__ : t.view(orig_batch_dims + t.shape[1:] ) , UpperCamelCase__ )
return out
class A :
def __init__(self , lowerCAmelCase = 5_1_2 , ):
__lowercase= max_chunk_size
__lowercase= None
__lowercase= None
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowercase= [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowercase= [c for c in candidates if c > min_chunk_size]
__lowercase= [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCAmelCase ) -> bool:
try:
with torch.no_grad():
fn(*__lowerCamelCase , chunk_size=__lowerCamelCase )
return True
except RuntimeError:
return False
__lowercase= 0
__lowercase= len(__lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
__lowercase= test_chunk_size(candidates[i] )
if not viable:
__lowercase= (min_viable_chunk_size_index + i) // 2
else:
__lowercase= i
__lowercase= (i + len(__lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= True
for aa, aa in zip(__lowerCamelCase , __lowerCamelCase ):
assert type(__lowerCamelCase ) == type(__lowerCamelCase )
if isinstance(__lowerCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(__lowerCamelCase , __lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__lowercase= [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase : x[0] )]
__lowercase= [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(__lowerCamelCase , __lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= True
__lowercase= tree_map(lambda lowerCAmelCase : a.shape if isinstance(__lowerCamelCase , torch.Tensor ) else a , __lowerCamelCase , __lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowerCamelCase )
__lowercase= self._compare_arg_caches(self.cached_arg_data , __lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
__lowercase= False
if not consistent:
__lowercase= self._determine_favorable_chunk_size(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
__lowercase= arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
from collections.abc import Sequence
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return sum(c * (x**i) for i, c in enumerate(_snake_case ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Any = 0.0
for coeff in reversed(_snake_case ):
_A : Any = result * x + coeff
return result
if __name__ == "__main__":
_snake_case = (0.0, 0.0, 5.0, 9.3, 7.0)
_snake_case = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 26
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ):
_A = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 315
| 0
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _snake_case ( _snake_case : int = 8 ) -> str:
'''simple docstring'''
_A = ascii_letters + digits + punctuation
return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
i -= len(_snake_case )
_A = i // 3
_A = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_A = (
chars_incl
+ random(_snake_case , quotient + remainder )
+ random(_snake_case , _snake_case )
+ random(_snake_case , _snake_case )
)
_A = list(_snake_case )
shuffle(_snake_case )
return "".join(_snake_case )
# random is a generalised function for letters, characters and numbers
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
return "".join(secrets.choice(_snake_case ) for _ in range(_snake_case ) )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] ) -> int:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : Any , _snake_case : str ) -> Dict:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : int ) -> int:
'''simple docstring'''
pass # Put your code here...
def _snake_case ( _snake_case : str , _snake_case : int = 8 ) -> bool:
'''simple docstring'''
if len(_snake_case ) < min_length:
# Your Password must be at least 8 characters long
return False
_A = any(char in ascii_uppercase for char in password )
_A = any(char in ascii_lowercase for char in password )
_A = any(char in digits for char in password )
_A = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = int(input('Please indicate the max length of your password: ' ).strip() )
_A = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_snake_case ) )
print(
'Alternative Password generated:' , alternative_password_generator(_snake_case , _snake_case ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 356
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
a = [8, 5, 9, 7]
a = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : list[int] , _UpperCAmelCase : list[list[int]] , _UpperCAmelCase : list[list[int]] , ):
_A = claim_vector
_A = allocated_resources_table
_A = maximum_claim_table
def lowerCAmelCase_ ( self : Tuple ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase_ ( self : Tuple ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase_ ( self : List[Any] ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase_ ( self : List[Any] ):
return {self.__need().index(_UpperCAmelCase ): i for i in self.__need()}
def lowerCAmelCase_ ( self : List[str] , **_UpperCAmelCase : int ):
_A = self.__need()
_A = self.__allocated_resources_table
_A = self.__available_resources()
_A = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_A = False
for each_need in need_list:
_A = True
for index, need in enumerate(_UpperCAmelCase ):
if need > available_resources[index]:
_A = False
break
if execution:
_A = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_A = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_UpperCAmelCase )
# update available/freed resources stack
_A = np.array(_UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def lowerCAmelCase_ ( self : Union[str, Any] ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_UpperCAmelCase ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_UpperCAmelCase ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
| 0
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None ) -> List[Any]:
a = self.layer[current_layer](__lowerCamelCase , __lowerCamelCase , head_mask[current_layer] )
a = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Tuple ) -> str:
super().__init__(__lowerCamelCase )
a = BertEncoderWithPabee(__lowerCamelCase )
self.init_weights()
a = 0
a = 0
a = 0
a = 0
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str] ) -> List[str]:
a = threshold
def __UpperCAmelCase ( self : int , __lowerCamelCase : Tuple ) -> Any:
a = patience
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
a = 0
a = 0
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
a = self.inference_layers_num / self.inference_instances_num
a = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(__lowerCamelCase )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : int=None , __lowerCamelCase : int=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None , __lowerCamelCase : Any=False , ) -> Optional[Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
a = input_ids.size()
elif inputs_embeds is not None:
a = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
a = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
a = torch.ones(__lowerCamelCase , device=__lowerCamelCase )
if token_type_ids is None:
a = torch.zeros(__lowerCamelCase , dtype=torch.long , device=__lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
a = self.get_extended_attention_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
a , a , a = encoder_hidden_states.size()
a = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
a = torch.ones(__lowerCamelCase , device=__lowerCamelCase )
a = self.invert_attention_mask(__lowerCamelCase )
else:
a = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
a = self.get_head_mask(__lowerCamelCase , self.config.num_hidden_layers )
a = self.embeddings(
input_ids=__lowerCamelCase , position_ids=__lowerCamelCase , token_type_ids=__lowerCamelCase , inputs_embeds=__lowerCamelCase )
a = embedding_output
if self.training:
a = []
for i in range(self.config.num_hidden_layers ):
a = self.encoder.adaptive_forward(
__lowerCamelCase , current_layer=__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase )
a = self.pooler(__lowerCamelCase )
a = output_layers[i](output_dropout(__lowerCamelCase ) )
res.append(__lowerCamelCase )
elif self.patience == 0: # Use all layers for inference
a = self.encoder(
__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )
a = self.pooler(encoder_outputs[0] )
a = [output_layers[self.config.num_hidden_layers - 1](__lowerCamelCase )]
else:
a = 0
a = None
a = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
a = self.encoder.adaptive_forward(
__lowerCamelCase , current_layer=__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase )
a = self.pooler(__lowerCamelCase )
a = output_layers[i](__lowerCamelCase )
if regression:
a = logits.detach()
if patient_result is not None:
a = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
a = 0
else:
a = logits.detach().argmax(dim=1 )
if patient_result is not None:
a = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__lowerCamelCase ) ):
patient_counter += 1
else:
a = 0
a = logits
if patient_counter == self.patience:
break
a = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Any ) -> Union[str, Any]:
super().__init__(__lowerCamelCase )
a = config.num_labels
a = BertModelWithPabee(__lowerCamelCase )
a = nn.Dropout(config.hidden_dropout_prob )
a = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Any=None , ) -> Optional[int]:
a = self.bert(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
a = (logits[-1],)
if labels is not None:
a = None
a = 0
for ix, logits_item in enumerate(__lowerCamelCase ):
if self.num_labels == 1:
# We are doing regression
a = MSELoss()
a = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
a = CrossEntropyLoss()
a = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
a = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
a = (total_loss / total_weights,) + outputs
return outputs
| 107
|
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :List[str] = ""
for word_or_phrase in separated:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCamelCase_ :
SCREAMING_SNAKE_CASE_ = BlenderbotSmallConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 'gelu'
def __init__( self : Any ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any]=13 ,__lowerCamelCase : str=7 ,__lowerCamelCase : Optional[int]=True ,__lowerCamelCase : Tuple=False ,__lowerCamelCase : Tuple=99 ,__lowerCamelCase : Dict=32 ,__lowerCamelCase : Dict=2 ,__lowerCamelCase : Optional[Any]=4 ,__lowerCamelCase : Union[str, Any]=37 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Optional[int]=0.1 ,__lowerCamelCase : str=20 ,__lowerCamelCase : List[str]=2 ,__lowerCamelCase : Dict=1 ,__lowerCamelCase : List[Any]=0 ,):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
a = tf.concat([input_ids, eos_tensor] ,axis=1 )
a = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
a = prepare_blenderbot_small_inputs_dict(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ):
'''simple docstring'''
a = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
a = inputs_dict['''input_ids''']
a = input_ids[:1, :]
a = inputs_dict['''attention_mask'''][:1, :]
a = inputs_dict['''head_mask''']
a = 1
# first forward pass
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,head_mask=__lowerCamelCase ,use_cache=__lowerCamelCase )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) ,config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] ,axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )[0]
a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=None, snake_case_=None, snake_case_=None, ) -> Any:
"""simple docstring"""
if attention_mask is None:
a = tf.cast(tf.math.not_equal(snake_case_, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase_ ( a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = TFBlenderbotSmallModelTester(self )
a = ConfigTester(self ,config_class=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
SCREAMING_SNAKE_CASE_ = 'facebook/blenderbot_small-90M'
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = self.tokenizer(self.src_text ,return_tensors='''tf''' )
a = self.model.generate(
model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=__lowerCamelCase ,)
a = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 350
|
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
"""simple docstring"""
a = ''''''
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
a = ''''''
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict:
"""simple docstring"""
a = int('''0b''' + data[0] + data[-1], 2 )
a = int('''0b''' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = message[:4]
a = message[4:]
a = apply_table(snake_case_, snake_case_ )
a = xor(snake_case_, snake_case_ )
a = apply_sbox(snake_case_, temp[:4] ) # noqa: E741
a = apply_sbox(snake_case_, temp[4:] )
a = '''0''' * (2 - len(snake_case_ )) + l # noqa: E741
a = '''0''' * (2 - len(snake_case_ )) + r
a = apply_table(l + r, snake_case_ )
a = xor(snake_case_, snake_case_ )
return temp + right
if __name__ == "__main__":
UpperCamelCase__ : int = input("""Enter 10 bit key: """)
UpperCamelCase__ : Union[str, Any] = input("""Enter 8 bit message: """)
UpperCamelCase__ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCamelCase__ : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCamelCase__ : Optional[int] = [2, 4, 3, 1]
UpperCamelCase__ : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase__ : str = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase__ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase__ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase__ : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase__ : Optional[Any] = apply_table(key, paa_table)
UpperCamelCase__ : str = temp[:5]
UpperCamelCase__ : List[Any] = temp[5:]
UpperCamelCase__ : Dict = left_shift(left)
UpperCamelCase__ : Any = left_shift(right)
UpperCamelCase__ : Optional[Any] = apply_table(left + right, pa_table)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : int = left_shift(right)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : Dict = left_shift(right)
UpperCamelCase__ : List[str] = apply_table(left + right, pa_table)
# encryption
UpperCamelCase__ : Tuple = apply_table(message, IP)
UpperCamelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[int] = temp[4:] + temp[:4]
UpperCamelCase__ : Any = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Tuple = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
UpperCamelCase__ : Union[str, Any] = apply_table(CT, IP)
UpperCamelCase__ : List[str] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[Any] = temp[4:] + temp[:4]
UpperCamelCase__ : Optional[int] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 330
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_snake_case = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase ( datasets.BuilderConfig ):
_a = None
_a = "utf-8"
_a = None
_a = None
_a = True # deprecated
_a = None # deprecated
_a = 1_0 << 2_0 # 10MB
_a = None
class lowercase ( datasets.ArrowBasedBuilder ):
_a = JsonConfig
def a__ ( self ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
_A : Optional[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self , _a ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_A : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_A : Optional[Any] = data_files
if isinstance(_a , _a ):
_A : List[Any] = [files]
_A : Optional[Any] = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_A : Any = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_A : Tuple = [files]
_A : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"""files""": files} ) )
return splits
def a__ ( self , _a ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_A : str = self.config.features.arrow_schema.field(_a ).type
_A : int = pa_table.append_column(_a , pa.array([None] * len(_a ) , type=_a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_A : int = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def a__ ( self , _a ) -> Dict:
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_A : Optional[Any] = json.load(_a )
# We keep only the field we are interested in
_A : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_a , (list, tuple) ):
_A : Any = set().union(*[row.keys() for row in dataset] )
_A : List[Any] = {col: [row.get(_a ) for row in dataset] for col in keys}
else:
_A : int = dataset
_A : Union[str, Any] = pa.Table.from_pydict(_a )
yield file_idx, self._cast_table(_a )
# If the file has one json object per line
else:
with open(_a , """rb""" ) as f:
_A : Any = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_A : Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
_A : List[str] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
_A : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_A : Union[str, Any] = batch.decode(self.config.encoding , errors=_a ).encode("""utf-8""" )
try:
while True:
try:
_A : List[str] = paj.read_json(
io.BytesIO(_a ) , read_options=paj.ReadOptions(block_size=_a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_a , pa.ArrowInvalid )
and "straddling" not in str(_a )
or block_size > len(_a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(_a )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_A : str = json.load(_a )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_a , _a ): # list is the only sequence type supported in JSON
try:
_A : List[str] = set().union(*[row.keys() for row in dataset] )
_A : Optional[Any] = {col: [row.get(_a ) for row in dataset] for col in keys}
_A : int = pa.Table.from_pydict(_a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(_a )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(_a )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
batch_idx += 1
| 26
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase__ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCamelCase__ : str = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ : List[Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase__ : int = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ : Dict = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ : Any = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ : Tuple = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCamelCase__ : str = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase__ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCamelCase__ : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCamelCase__ : List[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase__ : Dict = key.replace('module.last_layer_depth' , 'head.head' )
lowerCamelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ : Any = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ : Dict = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Optional[int]:
lowerCamelCase__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase__ : str = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCamelCase__ : Dict = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ : List[Any] = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase__ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCAmelCase : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50
| 0
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->int:
'''simple docstring'''
A__ = jnp.ones((batch_size, length)) / length
return scores
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = None
A__ = 20
A__ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__)
# tweak scores to not be uniform anymore
A__ = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
A__ = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
A__ = jax.nn.softmax(UpperCAmelCase__ , axis=-1)
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTemperatureLogitsWarper(temperature=1.3)
A__ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__) , axis=-1)
A__ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = None
A__ = 10
A__ = 2
# create ramp distribution
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
A__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
A__ = FlaxTopKLogitsWarper(3)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
A__ = 5
A__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, length)).copy()
A__ = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
'''simple docstring'''
A__ = None
A__ = 10
A__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
A__ = FlaxTopPLogitsWarper(0.8)
A__ = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# check edge cases with negative and extreme logits
A__ = np.broadcast_to(np.arange(UpperCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
A__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
# check that min length is applied at length 5
A__ = ids_tensor((batch_size, 20) , vocab_size=20)
A__ = 5
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''')])
# check that min length is not applied anymore at length 15
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = 15
A__ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
# check that all scores are -inf except the bos_token_id score
A__ = ids_tensor((batch_size, 1) , vocab_size=20)
A__ = 1
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A__ = 3
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = 20
A__ = 4
A__ = 0
A__ = 5
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
A__ = ids_tensor((batch_size, 4) , vocab_size=20)
A__ = 4
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A__ = 3
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
self.assertFalse(jnp.isinf(UpperCAmelCase__).any())
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
A__ = 4
A__ = 10
A__ = 15
A__ = 2
A__ = 1
A__ = 15
# dummy input_ids and scores
A__ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__)
A__ = input_ids.copy()
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scores.copy()
# instantiate all dist processors
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTopKLogitsWarper(3)
A__ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
A__ = 10
# no processor list
A__ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# with processor list
A__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
A__ = 4
A__ = 10
A__ = 15
A__ = 2
A__ = 1
A__ = 15
# dummy input_ids and scores
A__ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__)
A__ = input_ids.copy()
A__ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scores.copy()
# instantiate all dist processors
A__ = FlaxTemperatureLogitsWarper(temperature=0.5)
A__ = FlaxTopKLogitsWarper(3)
A__ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
A__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__)
A__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__)
A__ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__)
A__ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]):
A__ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
A__ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
return scores
# with processor list
def run_processor_list(UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple):
A__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
A__ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__)
return scores
A__ = jax.jit(UpperCAmelCase__)
A__ = jax.jit(UpperCAmelCase__)
A__ = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 231
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Union[str, Any] = """▁"""
_lowerCamelCase : Optional[Any] = {"""vocab_file""": """spiece.model"""}
_lowerCamelCase : str = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
_lowerCamelCase : List[str] = {
"""google/pegasus-xsum""": 512,
}
_lowerCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Union[str, Any]="<mask_2>" , UpperCAmelCase__ : List[str]="<mask_1>" , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=103 , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Dict , ) ->None:
'''simple docstring'''
A__ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase__)}, but is"""
f""" {type(UpperCAmelCase__)}""")
A__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase__) , self.offset - 1)
]
if len(set(UpperCAmelCase__)) != len(UpperCAmelCase__):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""")
A__ = additional_special_tokens_extended
else:
A__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset)]
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token_sent=UpperCAmelCase__ , offset=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
A__ = mask_token_sent
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase__)
# add special tokens to encoder dict
A__ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1)})
A__ = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return len(self.sp_model) + self.offset
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict[str, int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : int , UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A__ = self.sp_model.piece_to_id(UpperCAmelCase__)
return sp_id + self.offset
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int) ->str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A__ = self.sp_model.IdToPiece(index - self.offset)
return token
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = []
A__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase__) + token
A__ = []
else:
current_sub_tokens.append(UpperCAmelCase__)
out_string += self.sp_model.decode(UpperCAmelCase__)
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=False) ->Union[str, Any]:
'''simple docstring'''
return 1
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
A__ = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List , UpperCAmelCase__ : Optional[List] = None , UpperCAmelCase__ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase__)
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase__) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__)
return (out_vocab_file,)
| 231
| 1
|
from manim import *
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
a_ : List[Any] = Rectangle(height=0.25 , width=0.25 )
a_ : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a_ : str = [mem.copy() for i in range(6 )]
a_ : Tuple = [mem.copy() for i in range(6 )]
a_ : Any = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : int = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Optional[Any] = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Optional[Any] = Text('CPU' , font_size=2_4 )
a_ : Any = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = [mem.copy() for i in range(4 )]
a_ : List[Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Any = Text('GPU' , font_size=2_4 )
a_ : Optional[Any] = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = [mem.copy() for i in range(6 )]
a_ : List[Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : List[str] = Text('Model' , font_size=2_4 )
a_ : int = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
a_ : Dict = []
a_ : str = []
a_ : int = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
rect.set_stroke(SCREAMING_SNAKE_CASE__ )
a_ : int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=SCREAMING_SNAKE_CASE__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=SCREAMING_SNAKE_CASE__ , buff=0.0 )
self.add(SCREAMING_SNAKE_CASE__ )
model_cpu_arr.append(SCREAMING_SNAKE_CASE__ )
self.add(*SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
a_ : Tuple = [mem.copy() for i in range(6 )]
a_ : Union[str, Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Dict = Text('Loaded Checkpoint' , font_size=2_4 )
a_ : str = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
a_ : Dict = []
a_ : Optional[int] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = fill.copy().set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.7 )
target.move_to(SCREAMING_SNAKE_CASE__ )
ckpt_arr.append(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(SCREAMING_SNAKE_CASE__ )
self.add(*SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
a_ : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a_ : Optional[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(SCREAMING_SNAKE_CASE__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE__ )
a_ : str = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
a_ : List[Any] = [meta_mem.copy() for i in range(6 )]
a_ : Optional[Any] = [meta_mem.copy() for i in range(6 )]
a_ : int = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Optional[int] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Tuple = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
a_ : Dict = Text('Disk' , font_size=2_4 )
a_ : Optional[Any] = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=3 ) , Write(SCREAMING_SNAKE_CASE__ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE__ , run_time=1 ) )
a_ : List[Any] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
a_ : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(SCREAMING_SNAKE_CASE__ , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE__ )
self.play(FadeOut(SCREAMING_SNAKE_CASE__ ) )
a_ : Optional[Any] = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=3 ) )
self.play(
FadeOut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) , )
self.wait()
| 32
|
'''simple docstring'''
def __UpperCAmelCase ( A : int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase_ : int = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
UpperCAmelCase_ : List[str] = int(sequence[i] , 2 )
return sequence
def __UpperCAmelCase ( A : int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__a: Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 214
|
'''simple docstring'''
def __UpperCamelCase ( ):
lowercase__ : Any = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase__ : Any = 6
lowercase__ : Optional[Any] = 1
lowercase__ : int = 1901
lowercase__ : List[str] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase__ : List[Any] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase__ : Any = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase__ : List[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase__ : Dict = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 214
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : Optional[int] = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : int = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : Any = num_choices
UpperCAmelCase : str = scope
UpperCAmelCase : str = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Tuple = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = OpenAIGPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = OpenAIGPTLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = OpenAIGPTDoubleHeadsModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : str = self.num_labels
UpperCAmelCase : Optional[int] = OpenAIGPTForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Any = config_and_inputs
UpperCAmelCase : Dict = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowerCAmelCase : Union[str, Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowerCAmelCase : int = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = inputs_dict["""labels"""]
UpperCAmelCase : List[Any] = inputs_dict["""labels"""]
UpperCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = OpenAIGPTModelTester(self )
UpperCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = OpenAIGPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president is
UpperCAmelCase : Optional[int] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase : List[Any] = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].tolist() , _SCREAMING_SNAKE_CASE )
| 109
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ,_a : T ):
'''simple docstring'''
_a : List[str] = data
_a : Node[T] | None = None
def __str__( self : Dict ):
'''simple docstring'''
return F"""{self.data}"""
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
_a : Tuple = self.top
while node:
yield node.data
_a : int = node.next
def __str__( self : str ):
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __lowercase ( self : str ):
'''simple docstring'''
return self.top is None
def __lowercase ( self : List[Any] ,_a : T ):
'''simple docstring'''
_a : int = Node(_a )
if not self.is_empty():
_a : Optional[Any] = self.top
_a : List[str] = node
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_a )
_a : List[Any] = self.top
_a : int = self.top.next
return pop_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'cvt'
def __init__( self : str , snake_case : Optional[Any]=3 , snake_case : Tuple=[7, 3, 3] , snake_case : Optional[int]=[4, 2, 2] , snake_case : Dict=[2, 1, 1] , snake_case : Any=[64, 192, 384] , snake_case : Any=[1, 3, 6] , snake_case : Dict=[1, 2, 10] , snake_case : int=[4.0, 4.0, 4.0] , snake_case : int=[0.0, 0.0, 0.0] , snake_case : List[Any]=[0.0, 0.0, 0.0] , snake_case : Union[str, Any]=[0.0, 0.0, 0.1] , snake_case : Dict=[True, True, True] , snake_case : List[Any]=[False, False, True] , snake_case : str=["dw_bn", "dw_bn", "dw_bn"] , snake_case : Union[str, Any]=[3, 3, 3] , snake_case : Union[str, Any]=[1, 1, 1] , snake_case : Union[str, Any]=[2, 2, 2] , snake_case : Optional[int]=[1, 1, 1] , snake_case : Union[str, Any]=[1, 1, 1] , snake_case : List[str]=0.02 , snake_case : int=1e-12 , **snake_case : str , ):
'''simple docstring'''
super().__init__(**snake_case )
A__ : int = num_channels
A__ : Dict = patch_sizes
A__ : Any = patch_stride
A__ : Union[str, Any] = patch_padding
A__ : Any = embed_dim
A__ : str = num_heads
A__ : Optional[int] = depth
A__ : int = mlp_ratio
A__ : Dict = attention_drop_rate
A__ : Optional[Any] = drop_rate
A__ : Any = drop_path_rate
A__ : int = qkv_bias
A__ : Dict = cls_token
A__ : Any = qkv_projection_method
A__ : Tuple = kernel_qkv
A__ : List[Any] = padding_kv
A__ : Any = stride_kv
A__ : Optional[int] = padding_q
A__ : List[str] = stride_q
A__ : Any = initializer_range
A__ : Any = layer_norm_eps
| 365
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ):
'''simple docstring'''
A__ : Union[str, Any] = parent
A__ : Optional[Any] = batch_size
A__ : Dict = seq_length
A__ : str = is_training
A__ : Tuple = use_input_mask
A__ : Dict = use_token_type_ids
A__ : Dict = use_labels
A__ : int = vocab_size
A__ : List[str] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : List[str] = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : int = type_sequence_label_size
A__ : Optional[Any] = initializer_range
A__ : int = num_labels
A__ : Optional[int] = num_choices
A__ : Optional[int] = scope
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
A__ : Union[str, Any] = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.get_config()
A__ : List[str] = 300
return config
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A__ : List[str] = model(snake_case , token_type_ids=snake_case )
A__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ):
'''simple docstring'''
A__ : Dict = True
A__ : Optional[Any] = MraModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Dict = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Optional[Any] = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Union[str, Any] = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = self.num_choices
A__ : str = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Dict = config_and_inputs
A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = MraModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason="""MRA does not output attentions""" )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case )
A__ : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Dict = 5_0265
A__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : List[Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Union[str, Any] = 5_0265
A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 296
| 0
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 340
|
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340
| 1
|
import doctest
from collections import deque
import numpy as np
class lowercase__ :
def __init__( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(UpperCAmelCase_ , UpperCAmelCase_ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(UpperCAmelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(UpperCAmelCase_ )
for j, item in enumerate(UpperCAmelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(UpperCAmelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCAmelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 169
|
import math
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCamelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 169
| 1
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
lowerCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ = model_name.find("patch" )
lowerCAmelCase_ = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
lowerCAmelCase_ = XCLIPVisionConfig(patch_size=__lowerCAmelCase , num_frames=__lowerCAmelCase )
if "large" in model_name:
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3072
lowerCAmelCase_ = 12
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 4096
lowerCAmelCase_ = 16
lowerCAmelCase_ = 24
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = 336
lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(__lowerCAmelCase , __lowerCAmelCase )
if "large" in model_name:
lowerCAmelCase_ = 768
return config
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if name == "token_embedding.weight":
lowerCAmelCase_ = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
lowerCAmelCase_ = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
lowerCAmelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase_ = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
lowerCAmelCase_ = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
lowerCAmelCase_ = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
lowerCAmelCase_ = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
lowerCAmelCase_ = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
lowerCAmelCase_ = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
lowerCAmelCase_ = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
lowerCAmelCase_ = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
lowerCAmelCase_ = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
lowerCAmelCase_ = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
lowerCAmelCase_ = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
lowerCAmelCase_ = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
lowerCAmelCase_ = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def lowerCamelCase__ ( __lowerCAmelCase : int , __lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(__lowerCAmelCase )
if "attn.in_proj" in key:
lowerCAmelCase_ = key.split("." )
if key.startswith("visual" ):
lowerCAmelCase_ = key_split[3]
lowerCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[
:dim
]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
elif key.startswith("mit" ):
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = rename_key(__lowerCAmelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ = val.T
lowerCAmelCase_ = val
return orig_state_dict
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
if num_frames == 8:
lowerCAmelCase_ = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
lowerCAmelCase_ = "eating_spaghetti.npy"
elif num_frames == 32:
lowerCAmelCase_ = "eating_spaghetti_32_frames.npy"
lowerCAmelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=__lowerCAmelCase , repo_type="dataset" , )
lowerCAmelCase_ = np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Any=False ):
"""simple docstring"""
lowerCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
lowerCAmelCase_ = model_to_url[model_name]
lowerCAmelCase_ = 8
if "16-frames" in model_name:
lowerCAmelCase_ = 16
elif "shot" in model_name:
lowerCAmelCase_ = 32
lowerCAmelCase_ = get_xclip_config(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = XCLIPModel(__lowerCAmelCase )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ = "pytorch_model.bin"
gdown.cached_download(__lowerCAmelCase , __lowerCAmelCase , quiet=__lowerCAmelCase )
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase )["model"]
lowerCAmelCase_ = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = XCLIPModel(__lowerCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ = 336 if model_name == "xclip-large-patch14-16-frames" else 224
lowerCAmelCase_ = VideoMAEImageProcessor(size=__lowerCAmelCase )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase_ = XCLIPProcessor(image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
lowerCAmelCase_ = prepare_video(__lowerCAmelCase )
lowerCAmelCase_ = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=__lowerCAmelCase , return_tensors="pt" , padding=__lowerCAmelCase )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ = model(**__lowerCAmelCase )
# Verify outputs
lowerCAmelCase_ = outputs.logits_per_video
lowerCAmelCase_ = logits_per_video.softmax(dim=1 )
print("Probs:" , __lowerCAmelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(__lowerCAmelCase , organization="nielsr" )
processor.push_to_hub(__lowerCAmelCase , organization="nielsr" )
slow_tokenizer.push_to_hub(__lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_A = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 231
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ):
"""simple docstring"""
lowerCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ = ""
else:
lowerCAmelCase_ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
lowerCAmelCase_ = dct.pop(__lowerCAmelCase )
lowerCAmelCase_ = val
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = DeiTConfig()
# all deit models have fine-tuned heads
lowerCAmelCase_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1000
lowerCAmelCase_ = "huggingface/label-files"
lowerCAmelCase_ = "imagenet-1k-id2label.json"
lowerCAmelCase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
lowerCAmelCase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = int(deit_name[-6:-4] )
lowerCAmelCase_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
elif deit_name[9:].startswith("small" ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 4096
lowerCAmelCase_ = 24
lowerCAmelCase_ = 16
# load original model from timm
lowerCAmelCase_ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ = timm_model.state_dict()
lowerCAmelCase_ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCAmelCase_ = DeiTForImageClassificationWithTeacher(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCAmelCase_ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCAmelCase_ = DeiTImageProcessor(size=__lowerCAmelCase , crop_size=config.image_size )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase_ = encoding["pixel_values"]
lowerCAmelCase_ = model(__lowerCAmelCase )
lowerCAmelCase_ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_A = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 231
| 1
|
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , __A : int ):
snake_case__ : Union[str, Any] = order
# a_{0} ... a_{k}
snake_case__ : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
snake_case__ : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
snake_case__ : str = [0.0] * self.order
# y[n-1] ... y[n-k]
snake_case__ : Optional[Any] = [0.0] * self.order
def _lowercase ( self : Dict , __A : list[float] , __A : list[float] ):
if len(_lowerCamelCase ) < self.order:
snake_case__ : Any = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
snake_case__ : List[Any] = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
snake_case__ : Union[str, Any] = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
snake_case__ : Tuple = a_coeffs
snake_case__ : str = b_coeffs
def _lowercase ( self : Tuple , __A : float ):
snake_case__ : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
snake_case__ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
snake_case__ : Optional[Any] = self.input_history[:-1]
snake_case__ : List[str] = self.output_history[:-1]
snake_case__ : Tuple = sample
snake_case__ : Tuple = result
return result
| 354
|
__lowerCamelCase : Optional[int] = """Tobias Carryer"""
from time import time
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] , __A : Dict=int(time() ) ): # noqa: B008
snake_case__ : List[Any] = multiplier
snake_case__ : Optional[int] = increment
snake_case__ : Optional[int] = modulo
snake_case__ : Union[str, Any] = seed
def _lowercase ( self : str ):
snake_case__ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__lowerCamelCase : int = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 286
| 0
|
from collections import defaultdict
from math import gcd
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 1_500_000 ):
'''simple docstring'''
lowercase__ : defaultdict = defaultdict(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE_ , 2 ):
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > 1:
continue
lowercase__ : Optional[Any] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 214
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 214
| 1
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) <= 1:
return lst
UpperCAmelCase : List[str] = 1
while i < len(UpperCAmelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase : str = 1
return lst
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 360
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowercase__ = re.compile(r"([A-Z]+)([A-Z][a-z])")
lowercase__ = re.compile(r"([a-z\d])([A-Z])")
lowercase__ = re.compile(r"(?<!_)_(?!_)")
lowercase__ = re.compile(r"(_{2,})")
lowercase__ = r"^\w+(\.\w+)*$"
lowercase__ = r"<>:/\|?*"
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _uppercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
UpperCAmelCase : str = _lowercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
return name.lower()
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _single_underscore_re.split(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(UpperCAmelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCAmelCase_ ) if n != '' )
def UpperCamelCase( UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , UpperCAmelCase_ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(UpperCAmelCase_ )}-{split}"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase : Dict = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
return F"""{filepath}*"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ):
UpperCAmelCase : Optional[int] = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if shard_lengths:
UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
UpperCAmelCase : List[str] = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(UpperCAmelCase_ )]
if filetype_suffix:
UpperCAmelCase : Dict = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
UpperCAmelCase : Optional[Any] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 280
| 0
|
from collections.abc import Callable
class _snake_case :
def __init__( self , _a = None ):
__magic_name__ : int = []
# Stores indexes of each item for supporting updates and deletion.
__magic_name__ : List[str] = {}
# Stores current size of heap.
__magic_name__ : Optional[int] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__magic_name__ : Optional[int] = key or (lambda _a : x)
def SCREAMING_SNAKE_CASE ( self , _a ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ , __magic_name__ : Optional[Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__magic_name__ , __magic_name__ : str = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : List[Any] = self._left(lowerCamelCase__ )
__magic_name__ : Any = self._right(lowerCamelCase__ )
__magic_name__ : str = i
if left is not None and not self._cmp(lowerCamelCase__ , lowerCamelCase__ ):
__magic_name__ : str = left
if right is not None and not self._cmp(lowerCamelCase__ , lowerCamelCase__ ):
__magic_name__ : List[str] = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : int = self._parent(lowerCamelCase__ )
while parent is not None and not self._cmp(lowerCamelCase__ , lowerCamelCase__ ):
self._swap(lowerCamelCase__ , lowerCamelCase__ )
__magic_name__ , __magic_name__ : Union[str, Any] = parent, self._parent(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = self._get_valid_parent(lowerCamelCase__ )
while valid_parent != index:
self._swap(lowerCamelCase__ , lowerCamelCase__ )
__magic_name__ , __magic_name__ : Optional[int] = valid_parent, self._get_valid_parent(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
if item not in self.pos_map:
return
__magic_name__ : Optional[Any] = self.pos_map[item]
__magic_name__ : List[Any] = [item, self.key(lowerCamelCase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCamelCase__ )
self._heapify_down(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self , _a ):
if item not in self.pos_map:
return
__magic_name__ : Optional[Any] = self.pos_map[item]
del self.pos_map[item]
__magic_name__ : Union[str, Any] = self.arr[self.size - 1]
__magic_name__ : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCamelCase__ )
self._heapify_down(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Optional[Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCamelCase__ )] )
else:
__magic_name__ : Tuple = [item, self.key(lowerCamelCase__ )]
__magic_name__ : List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "table-transformer"
__snake_case : Union[str, Any] = ["past_key_values"]
__snake_case : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.d_model
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 296
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( _lowerCAmelCase ):
A = ['''image_processor''', '''tokenizer''']
A = '''Pix2StructImageProcessor'''
A = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Tuple = False
super().__init__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __call__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 2048, SCREAMING_SNAKE_CASE_ = 0, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase_: Optional[Any] = self.tokenizer
UpperCAmelCase_: Optional[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, stride=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, return_overflowing_tokens=SCREAMING_SNAKE_CASE_, return_special_tokens_mask=SCREAMING_SNAKE_CASE_, return_offsets_mapping=SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_, return_length=SCREAMING_SNAKE_CASE_, verbose=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase_: Union[str, Any] = self.image_processor(
SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, max_patches=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
else:
# add pixel_values and bbox
UpperCAmelCase_: Any = self.image_processor(
SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, max_patches=SCREAMING_SNAKE_CASE_, header_text=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase_: int = self.tokenizer(
text=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, stride=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, return_overflowing_tokens=SCREAMING_SNAKE_CASE_, return_special_tokens_mask=SCREAMING_SNAKE_CASE_, return_offsets_mapping=SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_, return_length=SCREAMING_SNAKE_CASE_, verbose=SCREAMING_SNAKE_CASE_, return_tensors=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if "attention_mask" in text_encoding:
UpperCAmelCase_: Optional[Any] = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
UpperCAmelCase_: List[str] = text_encoding.pop("""input_ids""" )
else:
UpperCAmelCase_: Tuple = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE_ )
return encoding_image_processor
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Any:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Dict = self.tokenizer.model_input_names
UpperCAmelCase_: Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 82
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a : Optional[int] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a : Any = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: int = create_model(
"""HTSAT-tiny""" , """roberta""" , lowerCAmelCase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowerCAmelCase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = {}
UpperCAmelCase_: Optional[Any] = r""".*sequential.(\d+).*"""
UpperCAmelCase_: str = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_: Optional[int] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_: int = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_: Dict = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCAmelCase__ )//3}.linear.' )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_: int = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_: Optional[Any] = 1 if projecton_layer == 0 else 2
UpperCAmelCase_: Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_: str = value
UpperCAmelCase_: Optional[int] = mixed_qkv.size(0 ) // 3
UpperCAmelCase_: Optional[int] = mixed_qkv[:qkv_dim]
UpperCAmelCase_: List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_: int = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_: str = query_layer
UpperCAmelCase_: List[Any] = key_layer
UpperCAmelCase_: str = value_layer
else:
UpperCAmelCase_: Tuple = value
return model_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[Any]=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_: Optional[Any] = clap_model.state_dict()
UpperCAmelCase_: Optional[Any] = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_: Dict = ClapConfig()
UpperCAmelCase_: Tuple = enable_fusion
UpperCAmelCase_: int = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 82
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[int] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
_lowerCAmelCase : str = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 169
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowerCAmelCase : int = get_logger(__name__)
_lowerCAmelCase : Any = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _UpperCamelCase :
@add_start_docstrings(lowerCamelCase )
def __call__( self :Tuple , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCamelCase :
@add_start_docstrings(lowerCamelCase )
def __call__( self :Union[str, Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCamelCase ( lowerCAmelCase ):
@add_start_docstrings(lowerCamelCase )
def __call__( self :List[Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int , **lowerCamelCase :str ) -> jnp.ndarray:
for processor in self:
UpperCAmelCase__ = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
UpperCAmelCase__ = processor(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
else:
UpperCAmelCase__ = processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :str , lowerCamelCase :float ) -> Tuple:
if not isinstance(lowerCamelCase , lowerCamelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCAmelCase__ = temperature
def __call__( self :int , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = scores / self.temperature
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[int] , lowerCamelCase :float , lowerCamelCase :float = -float("Inf" ) , lowerCamelCase :int = 1 ) -> Union[str, Any]:
if not isinstance(lowerCamelCase , lowerCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(lowerCamelCase , lowerCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCAmelCase__ = top_p
UpperCAmelCase__ = filter_value
UpperCAmelCase__ = min_tokens_to_keep
def __call__( self :Tuple , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ , UpperCAmelCase__ = lax.top_k(lowerCamelCase , scores.shape[-1] )
UpperCAmelCase__ = jnp.full_like(lowerCamelCase , self.filter_value )
UpperCAmelCase__ = jax.nn.softmax(lowerCamelCase , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase__ = jnp.roll(lowerCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase )
# min tokens to keep
UpperCAmelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase )
UpperCAmelCase__ = jnp.where(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jax.lax.sort_key_val(lowerCamelCase , lowerCamelCase )[-1]
return next_scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Union[str, Any] , lowerCamelCase :int , lowerCamelCase :float = -float("Inf" ) , lowerCamelCase :int = 1 ) -> List[str]:
if not isinstance(lowerCamelCase , lowerCamelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCAmelCase__ = max(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = filter_value
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ , UpperCAmelCase__ = scores.shape
UpperCAmelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase__ , UpperCAmelCase__ = lax.top_k(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.broadcast_to((jnp.arange(lowerCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase__ = topk_scores.flatten()
UpperCAmelCase__ = topk_indices.flatten() + shift
UpperCAmelCase__ = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase )
UpperCAmelCase__ = next_scores_flat.reshape(lowerCamelCase , lowerCamelCase )
return next_scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Any , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = bos_token_id
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Tuple , lowerCamelCase :int , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = max_length
UpperCAmelCase__ = eos_token_id
def __call__( self :Union[str, Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[Any] , lowerCamelCase :int , lowerCamelCase :int ) -> Tuple:
if not isinstance(lowerCamelCase , lowerCamelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(lowerCamelCase , lowerCamelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCAmelCase__ = min_length
UpperCAmelCase__ = eos_token_id
def __call__( self :int , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
UpperCAmelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase__ = jnp.where(lowerCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :int , lowerCamelCase :List[str] , lowerCamelCase :str ) -> Any:
UpperCAmelCase__ = list(lowerCamelCase )
UpperCAmelCase__ = begin_index
def __call__( self :Union[str, Any] , lowerCamelCase :Union[str, Any] , lowerCamelCase :List[str] , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase__ = jnp.where(lowerCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , lowerCamelCase )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :List[Any] , lowerCamelCase :list ) -> Tuple:
UpperCAmelCase__ = list(lowerCamelCase )
def __call__( self :Optional[Any] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
UpperCAmelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :List[Any] , lowerCamelCase :List[str] ) -> Union[str, Any]:
UpperCAmelCase__ = dict(lowerCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase__ = force_token_array.at[index].set(lowerCamelCase )
UpperCAmelCase__ = jnp.intaa(lowerCamelCase )
def __call__( self :Optional[int] , lowerCamelCase :jnp.ndarray , lowerCamelCase :jnp.ndarray , lowerCamelCase :int ) -> jnp.ndarray:
def _force_token(lowerCamelCase :str ):
UpperCAmelCase__ = scores.shape[0]
UpperCAmelCase__ = self.force_token_array[generation_idx]
UpperCAmelCase__ = jnp.ones_like(lowerCamelCase , dtype=scores.dtype ) * -float("inf" )
UpperCAmelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase__ = lax.dynamic_update_slice(lowerCamelCase , lowerCamelCase , (0, current_token) )
return new_scores
UpperCAmelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowerCamelCase ) , lambda: scores , ) , )
return scores
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[Any] , lowerCamelCase :List[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Tuple ) -> Dict:
UpperCAmelCase__ = generate_config.eos_token_id
UpperCAmelCase__ = generate_config.no_timestamps_token_id
UpperCAmelCase__ = generate_config.no_timestamps_token_id + 1
UpperCAmelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase , "max_initial_timestamp_index" ):
UpperCAmelCase__ = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase__ = model_config.vocab_size
def __call__( self :List[str] , lowerCamelCase :str , lowerCamelCase :int , lowerCamelCase :Any ) -> Union[str, Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCAmelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowerCamelCase :int , lowerCamelCase :Union[str, Any] ):
UpperCAmelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowerCamelCase , )
UpperCAmelCase__ = jnp.where((cur_len - self.begin_index) < 2 , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowerCamelCase , lowerCamelCase , )
return jnp.where(
lowerCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , lowerCamelCase , )
UpperCAmelCase__ = jax.vmap(lowerCamelCase )(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(cur_len == self.begin_index , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowerCamelCase , )
UpperCAmelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase__ = jnp.where(
lowerCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , lowerCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase__ = jax.nn.log_softmax(lowerCamelCase , axis=-1 )
def handle_cumulative_probs(lowerCamelCase :Optional[int] , lowerCamelCase :Optional[Any] ):
UpperCAmelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , lowerCamelCase , )
UpperCAmelCase__ = jax.vmap(lowerCamelCase )(lowerCamelCase , lowerCamelCase )
return scores
| 169
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( _lowercase , unittest.TestCase ):
a = XLMTokenizer
a = False
def lowerCamelCase_ ( self: Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase__ : List[str] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowerCamelCase__ : Optional[int] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Any ):
lowerCamelCase__ : Union[str, Any] = """lower newer"""
lowerCamelCase__ : int = """lower newer"""
return input_text, output_text
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : str = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ : Optional[int] = """lower"""
lowerCamelCase__ : List[Any] = ["""low""", """er</w>"""]
lowerCamelCase__ : Dict = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ : List[Any] = tokens + ["""<unk>"""]
lowerCamelCase__ : Union[str, Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[Any] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
lowerCamelCase__ : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowerCamelCase__ : str = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 354
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_A : Union[str, Any] =8
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> Tuple:
lowerCamelCase__ : List[str] = x.device
lowerCamelCase__ : Any = (x * 255).int().clamp(0 , 255 )
lowerCamelCase__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
lowerCamelCase__ : int = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b c h w -> b c 1 h w""" )
lowerCamelCase__ : Tuple = ((x & mask) != 0).float()
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """b c d h w -> b (c d) h w""" )
lowerCamelCase__ : Optional[int] = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> List[Any]:
lowerCamelCase__ : List[Any] = x.device
lowerCamelCase__ : Dict = (x > 0).int()
lowerCamelCase__ : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
lowerCamelCase__ : List[Any] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = True , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCamelCase__ : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[timestep]
lowerCamelCase__ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCamelCase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCamelCase__ : Dict = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : Optional[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCamelCase__ : Tuple = self._get_variance(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCamelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCamelCase__ : Dict = model_output.device if torch.is_tensor(UpperCamelCase ) else """cpu"""
lowerCamelCase__ : str = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
lowerCamelCase__ : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="epsilon" , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
lowerCamelCase__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
lowerCamelCase__ : List[str] = None
# 1. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[t]
lowerCamelCase__ : List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCamelCase__ : str = 1 - alpha_prod_t
lowerCamelCase__ : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCamelCase__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCamelCase__ : Optional[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCamelCase__ : str = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : List[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Tuple = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCamelCase__ : Tuple = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase__ : Optional[Any] = 0
if t > 0:
lowerCamelCase__ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
lowerCamelCase__ : str = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
lowerCamelCase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: UNetaDConditionModel , UpperCamelCase__: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase__: Optional[float] = 1.0 , ):
super().__init__()
lowerCamelCase__ : Optional[int] = bit_scale
lowerCamelCase__ : List[Any] = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 50 , UpperCamelCase__: Optional[torch.Generator] = None , UpperCamelCase__: Optional[int] = 1 , UpperCamelCase__: Optional[str] = "pil" , UpperCamelCase__: bool = True , **UpperCamelCase__: int , ):
lowerCamelCase__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = decimal_to_bits(UpperCamelCase__ ) * self.bit_scale
lowerCamelCase__ : Union[str, Any] = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCamelCase__ : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Any = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Dict = bits_to_decimal(UpperCamelCase__ )
if output_type == "pil":
lowerCamelCase__ : int = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 129
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( ):
"""simple docstring"""
return 1
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase = 200 ):
"""simple docstring"""
return two_pound(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 286
|
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A_ : str = False
if num < 0:
A_ : Dict = True
A_ : Union[str, Any] = -num
A_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'roberta'
def __init__( self : Optional[int] , lowercase_ : Dict=5_0265 , lowercase_ : Optional[int]=768 , lowercase_ : int=12 , lowercase_ : Dict=12 , lowercase_ : int=3072 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=512 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.0_2 , lowercase_ : str=1e-12 , lowercase_ : List[Any]=1 , lowercase_ : str=0 , lowercase_ : Optional[Any]=2 , lowercase_ : List[str]="absolute" , lowercase_ : int=True , lowercase_ : Any=None , **lowercase_ : Any , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCamelCase__ : str =vocab_size
UpperCamelCase__ : Any =hidden_size
UpperCamelCase__ : int =num_hidden_layers
UpperCamelCase__ : Optional[int] =num_attention_heads
UpperCamelCase__ : Optional[int] =hidden_act
UpperCamelCase__ : str =intermediate_size
UpperCamelCase__ : List[Any] =hidden_dropout_prob
UpperCamelCase__ : List[str] =attention_probs_dropout_prob
UpperCamelCase__ : int =max_position_embeddings
UpperCamelCase__ : Any =type_vocab_size
UpperCamelCase__ : List[Any] =initializer_range
UpperCamelCase__ : Dict =layer_norm_eps
UpperCamelCase__ : Dict =position_embedding_type
UpperCamelCase__ : List[str] =use_cache
UpperCamelCase__ : Tuple =classifier_dropout
class __a ( snake_case__ ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[str] ):
if self.task == "multiple-choice":
UpperCamelCase__ : Dict ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ : Optional[Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 157
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_SCREAMING_SNAKE_CASE : List[Any] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
_SCREAMING_SNAKE_CASE : Tuple = """|""".join(sys.argv[1:])
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
_SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 157
| 1
|
'''simple docstring'''
class a_ :
def __init__( self , snake_case_ ):
_lowerCAmelCase : Tuple = set_counts
_lowerCAmelCase : Union[str, Any] = max(_A )
_lowerCAmelCase : List[str] = len(_A )
_lowerCAmelCase : Union[str, Any] = [1] * num_sets
_lowerCAmelCase : int = list(range(_A ) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.get_parent(_A )
_lowerCAmelCase : Dict = self.get_parent(_A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : int = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_lowerCAmelCase : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Optional[int] = src_parent
_lowerCAmelCase : Optional[Any] = self.set_counts[src_parent]
_lowerCAmelCase : List[str] = max(self.max_set , _A )
return True
def __UpperCamelCase ( self , snake_case_ ):
if self.parents[disj_set] == disj_set:
return disj_set
_lowerCAmelCase : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 309
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
__A : Any = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__A : str = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__A : Optional[int] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=a , output_all_encodings=a , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__A : Union[str, Any] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__A : Any = os.path.join(get_home_dir() , 'models' )
__A : List[Any] = _load_vocab(a , a , a , cls=a )
__A : Dict = nlp.model.BERTModel(
a , len(a ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=a , use_token_type_embed=a , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=a , use_decoder=a , )
original_bort.load_parameters(a , cast_dtype=a , ignore_extra=a )
__A : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__A : Any = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(a ),
}
__A : int = BertConfig.from_dict(a )
__A : Union[str, Any] = BertForMaskedLM(a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(a , a ):
__A : Tuple = hf_param.shape
__A : str = to_torch(params[gluon_param] )
__A : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__A : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__A : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__A : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__A : BertSelfAttention = layer.attention.self
__A : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__A : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__A : BertSelfOutput = layer.attention.output
__A : Tuple = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
__A : int = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
__A : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__A : str = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__A : BertIntermediate = layer.intermediate
__A : int = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__A : List[Any] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__A : BertOutput = layer.output
__A : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__A : Dict = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__A : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__A : Dict = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__A : Any = RobertaTokenizer.from_pretrained('roberta-base' )
__A : List[str] = tokenizer.encode_plus(a )['input_ids']
# Get gluon output
__A : List[str] = mx.nd.array([input_ids] )
__A : Union[str, Any] = original_bort(inputs=a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(a )
__A : Optional[Any] = BertModel.from_pretrained(a )
hf_bort_model.eval()
__A : Tuple = tokenizer.encode_plus(a , return_tensors='pt' )
__A : Any = hf_bort_model(**a )[0]
__A : Union[str, Any] = output_gluon[0].asnumpy()
__A : Tuple = output_hf[0].detach().numpy()
__A : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__A : int = np.allclose(a , a , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase : Dict = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 280
| 0
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase_ : Any = random.Random()
def _A ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ):
"""simple docstring"""
if rng is None:
a =global_rng
a =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=400 , __A=2000 , __A=10 , __A=160 , __A=8 , __A=0.0 , __A=4000 , __A=False , __A=True , ) -> Optional[Any]:
a =parent
a =batch_size
a =min_seq_length
a =max_seq_length
a =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a =padding_value
a =sampling_rate
a =return_attention_mask
a =do_normalize
a =feature_size
a =chunk_length
a =hop_length
def SCREAMING_SNAKE_CASE ( self ) -> str:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self , __A=False , __A=False ) -> str:
def _flatten(__A ):
return list(itertools.chain(*__A ) )
if equal_length:
a =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a =[np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a =feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
a =self.feature_extraction_class.from_pretrained(__A )
a =feat_extract_first.to_dict()
a =feat_extract_second.to_dict()
a =feat_extract_first.mel_filters
a =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a =os.path.join(__A , '''feat_extract.json''' )
feat_extract_first.to_json_file(__A )
a =self.feature_extraction_class.from_json_file(__A )
a =feat_extract_first.to_dict()
a =feat_extract_second.to_dict()
a =feat_extract_first.mel_filters
a =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a =[np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
a =feature_extractor(__A , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a =feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
a =feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test batched
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a =[floats_list((1, x) )[0] for x in (800, 800, 800)]
a =np.asarray(__A )
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test truncation required
a =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a =[np.asarray(__A ) for speech_input in speech_inputs]
a =[x[: feature_extractor.n_samples] for x in speech_inputs]
a =[np.asarray(__A ) for speech_input in speech_inputs_truncated]
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
import torch
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a =np.random.rand(100 , 32 ).astype(np.floataa )
a =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
a =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
a =ds.sort('''id''' ).select(range(__A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self ) -> Any:
# fmt: off
a =torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
a =self._load_datasamples(1 )
a =WhisperFeatureExtractor()
a =feature_extractor(__A , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __A , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a =self._load_datasamples(1 )[0]
a =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
a =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__A )[0]
self.assertTrue(np.all(np.mean(__A ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A ) - 1 ) < 1E-3 ) )
| 215
|
"""simple docstring"""
from __future__ import annotations
class __A :
"""simple docstring"""
def __init__( self , __A = 0 ) -> Dict:
a =key
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> list[str]:
assert isinstance(__A , __A ) and isinstance(__A , __A )
a =key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__A ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> list[str]:
assert isinstance(__A , __A ) and isinstance(__A , __A )
a =key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__A ) ^ key ) for ch in content]
def SCREAMING_SNAKE_CASE ( self , __A , __A = 0 ) -> str:
assert isinstance(__A , __A ) and isinstance(__A , __A )
a =key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a =''''''
for ch in content:
ans += chr(ord(__A ) ^ key )
return ans
def SCREAMING_SNAKE_CASE ( self , __A , __A = 0 ) -> str:
assert isinstance(__A , __A ) and isinstance(__A , __A )
a =key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a =''''''
for ch in content:
ans += chr(ord(__A ) ^ key )
return ans
def SCREAMING_SNAKE_CASE ( self , __A , __A = 0 ) -> bool:
assert isinstance(__A , __A ) and isinstance(__A , __A )
try:
with open(__A ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__A , __A ) )
except OSError:
return False
return True
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> bool:
assert isinstance(__A , __A ) and isinstance(__A , __A )
try:
with open(__A ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__A , __A ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 215
| 1
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = emb.weight.shape
_lowerCAmelCase = nn.Linear(snake_case , snake_case , bias=snake_case )
_lowerCAmelCase = emb.weight.data
return lin_layer
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = torch.load(snake_case , map_location="""cpu""" )
_lowerCAmelCase = Namespace(**checkpoint["""cfg"""]["""model"""] )
_lowerCAmelCase = checkpoint["""model"""]
remove_ignore_keys_(snake_case )
_lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
_lowerCAmelCase = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
_lowerCAmelCase = XGLMConfig(
vocab_size=snake_case , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_lowerCAmelCase = XGLMForCausalLM(snake_case )
_lowerCAmelCase = model.load_state_dict(snake_case , strict=snake_case )
print(snake_case )
_lowerCAmelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
A__ = parser.parse_args()
A__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 82
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = AudioLDMPipeline
__lowerCamelCase = TEXT_TO_AUDIO_PARAMS
__lowerCamelCase = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCamelCase = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_snake_case , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_lowerCAmelCase = ClapTextModelWithProjection(_snake_case )
_lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_snake_case , )
_lowerCAmelCase = SpeechTaHifiGan(_snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
embeds.append(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = """egg cracking"""
_lowerCAmelCase = audioldm_pipe(**_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = ["""hey"""]
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase = SpeechTaHifiGan(_snake_case ).to(_snake_case )
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = np.random.RandomState(_snake_case ).standard_normal((1, 8, 128, 16) )
_lowerCAmelCase = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = 25
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[77230:77240]
_lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[27780:27790]
_lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 82
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , UpperCAmelCase__ : Any , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = 1_3
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 9_9
lowerCAmelCase = 3_2
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 3_7
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_1_2
lowerCAmelCase = 1_6
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def __UpperCAmelCase ( self : List[Any] ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> Tuple:
lowerCAmelCase = TFEsmModel(config=UpperCAmelCase__ )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(UpperCAmelCase__ )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) -> int:
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=UpperCAmelCase__ )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(UpperCAmelCase__ )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = TFEsmForMaskedLM(config=UpperCAmelCase__ )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ) -> Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=UpperCAmelCase__ )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase : Dict = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip('Protein models do not support embedding resizing.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
for k, v in name.items():
assert isinstance(UpperCAmelCase__ , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase__ )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCAmelCase = model(UpperCAmelCase__ )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 55
|
'''simple docstring'''
def a_ ( lowerCamelCase : float ):
return 10 - x * x
def a_ ( lowerCamelCase : float , lowerCamelCase : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase ) * equation(lowerCamelCase ) >= 0:
raise ValueError('Wrong space!' )
lowerCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
lowerCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase ) * equation(lowerCamelCase ) < 0:
lowerCAmelCase = c
else:
lowerCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 55
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ) -> Any:
__a = UniSpeechSatForSequenceClassification.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
__a = downstream_dict['''projector.weight''']
__a = downstream_dict['''projector.bias''']
__a = downstream_dict['''model.post_net.linear.weight''']
__a = downstream_dict['''model.post_net.linear.bias''']
return model
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
__a = downstream_dict['''model.linear.weight''']
__a = downstream_dict['''model.linear.bias''']
return model
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> str:
__a = UniSpeechSatForXVector.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__ )
__a = downstream_dict['''connector.weight''']
__a = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__a = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__a = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__a = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> int:
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__a = checkpoint['''Downstream''']
__a = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ )
__a = WavaVecaFeatureExtractor.from_pretrained(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__a = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__a = convert_classification(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__a = convert_diarization(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif arch.endswith('''ForXVector''' ):
__a = convert_xvector(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__a = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCAmelCase__ )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowercase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 45
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__snake_case : int =logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Any = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
lowerCAmelCase__ : List[str] = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Optional[Any] = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowerCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model_accelerate.to(__lowerCamelCase )
model_accelerate.eval()
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Dict = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model_accelerate(__lowerCamelCase ,__lowerCamelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase ,low_cpu_mem_usage=__lowerCamelCase )
model_normal_load.to(__lowerCamelCase )
model_normal_load.eval()
lowerCAmelCase__ : List[Any] = model_normal_load(__lowerCamelCase ,__lowerCamelCase )['''sample''']
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : str = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : str = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 ) )
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ,__lowerCamelCase=(32, 32) ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowerCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self.dummy_input
lowerCAmelCase__ : Tuple = floats_tensor((4, 3) + (2_56, 2_56) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = noise
lowerCAmelCase__ : Union[str, Any] = model(**__lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : List[Any] = (2_56, 2_56)
lowerCAmelCase__ : str = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Optional[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Dict = 3
lowerCAmelCase__ : str = (32, 32)
lowerCAmelCase__ : Tuple = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
| 129
| 0
|
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : float , __UpperCamelCase : float ):
"""simple docstring"""
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__a ) * abs(__a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 365
|
"""simple docstring"""
import os
from pathlib import Path
def lowerCAmelCase ():
"""simple docstring"""
from torch.utils.cpp_extension import load
__UpperCamelCase =Path(__UpperCamelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__UpperCamelCase =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCamelCase , with_cuda=__UpperCamelCase , extra_include_paths=[str(__UpperCamelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 85
| 0
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _UpperCamelCase ( *snake_case__ ) -> Optional[int]:
if not isinstance(snake_case__, snake_case__ ):
__UpperCAmelCase : Tuple = list(snake_case__ )
for i in range(len(snake_case__ ) ):
__UpperCAmelCase : str = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _UpperCamelCase ( snake_case__ ) -> bool:
__UpperCAmelCase : Union[str, Any] = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(snake_case__, snake_case__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _UpperCamelCase ( snake_case__ = None, snake_case__ = 128 ) -> List[str]:
if function is None:
return functools.partial(snake_case__, starting_batch_size=snake_case__ )
__UpperCAmelCase : Dict = starting_batch_size
def decorator(*snake_case__, **snake_case__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__UpperCAmelCase : List[Any] = list(inspect.signature(snake_case__ ).parameters.keys() )
# Guard against user error
if len(snake_case__ ) < (len(snake_case__ ) + 1):
__UpperCAmelCase : Optional[int] = ", ".join([f'''{arg}={value}''' for arg, value in zip(params[1:], args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(snake_case__, *snake_case__, **snake_case__ )
except Exception as e:
if should_reduce_batch_size(snake_case__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 157
|
import numpy as np
def _UpperCamelCase ( snake_case__ ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def _UpperCamelCase ( snake_case__ ) -> np.ndarray:
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[str] = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
from math import factorial
def snake_case_ ( lowerCAmelCase_ = 20 )-> int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_UpperCAmelCase : Dict = n // 2
return int(factorial(lowerCAmelCase_ ) / (factorial(lowerCAmelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
A_ : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 215
|
'''simple docstring'''
from __future__ import annotations
import queue
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> str:
_UpperCAmelCase : Optional[Any] = data
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Union[str, Any] = None
def snake_case_ ( )-> TreeNode:
'''simple docstring'''
print("""\n********Press N to stop entering at any point of time********\n""" )
_UpperCAmelCase : Any = input("""Enter the value of the root node: """ ).strip().lower()
_UpperCAmelCase : queue.Queue = queue.Queue()
_UpperCAmelCase : List[str] = TreeNode(int(lowerCAmelCase_ ) )
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : str = q.get()
_UpperCAmelCase : Any = F'''Enter the left node of {node_found.data}: '''
_UpperCAmelCase : Union[str, Any] = input(lowerCAmelCase_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_UpperCAmelCase : List[str] = TreeNode(int(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[int] = left_node
q.put(lowerCAmelCase_ )
_UpperCAmelCase : Dict = F'''Enter the right node of {node_found.data}: '''
_UpperCAmelCase : Tuple = input(lowerCAmelCase_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_UpperCAmelCase : Any = TreeNode(int(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = right_node
q.put(lowerCAmelCase_ )
raise
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : Dict = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : Optional[int] = []
while not q.empty():
_UpperCAmelCase : Optional[int] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = n.left
# end of while means current node doesn't have left child
_UpperCAmelCase : int = stack.pop()
# start to traverse its right child
_UpperCAmelCase : Any = n.right
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : Optional[Any] = node
while n or stack:
while n:
stack.append(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = n.left
_UpperCAmelCase : Union[str, Any] = stack.pop()
print(n.data , end=""",""" )
_UpperCAmelCase : Any = n.right
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase ,_UpperCAmelCase : str = [], []
_UpperCAmelCase : Dict = node
stacka.append(lowerCAmelCase_ )
while stacka: # to find the reversed order of post order, store it in stack2
_UpperCAmelCase : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCAmelCase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def snake_case_ ( lowerCAmelCase_ = "" , lowerCAmelCase_=50 , lowerCAmelCase_="*" )-> str:
'''simple docstring'''
if not s:
return "\n" + width * char
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = divmod(width - len(lowerCAmelCase_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
A_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 215
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = '''PIL.Image.Image'''
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Optional[Any]:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : List[str] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : int = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : str = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Optional[Any] = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Any = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[int] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : Dict = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Union[str, Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : int = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : Tuple = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : int = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : str = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Union[str, Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : List[str] = storage.field('''path''' )
else:
UpperCAmelCase_ : List[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Any = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : List[Any] = f.read()
return bytes_
UpperCAmelCase_ : Dict = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : Union[str, Any] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : List[str] = image.format
else:
UpperCAmelCase_ : int = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : int = array.dtype
UpperCAmelCase_ : Optional[Any] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : str = dtype.kind
UpperCAmelCase_ : int = dtype.itemsize
UpperCAmelCase_ : Optional[int] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : str = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Dict = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Optional[int] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : Dict = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Dict = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_ : List[Any] = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : List[str] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 366
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 235
| 0
|
'''simple docstring'''
import numpy
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ = numpy.zeros(output_array.shape )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = input_arr
lowerCamelCase_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( UpperCAmelCase_ : numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( UpperCAmelCase_ : numpy.ndarray ):
return (value) * (1 - (value))
def __snake_case ( ):
lowerCamelCase_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ = TwoHiddenLayerNeuralNetwork(
input_array=UpperCAmelCase_ , output_array=UpperCAmelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCAmelCase_ , iterations=10 , give_loss=UpperCAmelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 55
|
'''simple docstring'''
a_ : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a_ : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a_ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 55
| 1
|
"""simple docstring"""
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(__a , __a ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
a__ : Any =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = (PNDMScheduler,)
_lowercase : str = (("""num_inference_steps""", 50),)
def _lowercase ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : Dict ={
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def _lowercase ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] =dict(self.forward_default_kwargs )
a__ : Tuple =kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
a__ : List[str] =self.dummy_sample
a__ : List[str] =0.1 * sample
a__ : str =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a__ : int =self.get_scheduler_config(**lowerCAmelCase__ )
a__ : Union[str, Any] =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
a__ : Any =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
a__ : List[Any] =scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
a__ : str =dummy_past_residuals[:]
a__ : Any =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : Dict =new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a__ : Optional[int] =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : Union[str, Any] =new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self ) -> int:
'''simple docstring'''
pass
def _lowercase ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] =dict(self.forward_default_kwargs )
a__ : List[str] =kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
a__ : List[str] =self.dummy_sample
a__ : int =0.1 * sample
a__ : Tuple =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a__ : Dict =self.get_scheduler_config()
a__ : List[str] =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
a__ : Dict =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
a__ : Dict =scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
a__ : Optional[int] =dummy_past_residuals[:]
a__ : Optional[Any] =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : List[Any] =new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a__ : List[str] =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : Any =new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : Union[str, Any] =self.scheduler_classes[0]
a__ : Optional[Any] =self.get_scheduler_config(**lowerCAmelCase__ )
a__ : Any =scheduler_class(**lowerCAmelCase__ )
a__ : int =1_0
a__ : Union[str, Any] =self.dummy_model()
a__ : Optional[int] =self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
a__ : List[Any] =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[Any] =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
a__ : int =model(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : int =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : str =dict(self.forward_default_kwargs )
a__ : Tuple =kwargs.pop("num_inference_steps" , lowerCAmelCase__ )
for scheduler_class in self.scheduler_classes:
a__ : Union[str, Any] =self.get_scheduler_config()
a__ : List[str] =scheduler_class(**lowerCAmelCase__ )
a__ : List[Any] =self.dummy_sample
a__ : Dict =0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__ , "set_timesteps" ):
scheduler.set_timesteps(lowerCAmelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , "set_timesteps" ):
a__ : int =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a__ : Tuple =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a__ : str =dummy_past_residuals[:]
a__ : List[Any] =scheduler.step_prk(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : int =scheduler.step_prk(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a__ : List[str] =scheduler.step_plms(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
a__ : Dict =scheduler.step_plms(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase__ )
a__ : Optional[Any] =self.scheduler_classes[0]
a__ : Tuple =self.get_scheduler_config(steps_offset=1 )
a__ : Optional[Any] =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase__ )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Dict =2_7
for scheduler_class in self.scheduler_classes:
a__ : Tuple =self.dummy_sample
a__ : Dict =0.1 * sample
a__ : Dict =self.get_scheduler_config()
a__ : int =scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
a__ : Any =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
a__ : List[Any] =self.scheduler_classes[0]
a__ : Dict =self.get_scheduler_config()
a__ : Tuple =scheduler_class(**lowerCAmelCase__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] =self.full_loop()
a__ : str =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Optional[Any] =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : str =self.full_loop(prediction_type="v_prediction" )
a__ : int =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Optional[int] =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple =self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 )
a__ : str =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Dict =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict =self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 )
a__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase__ ) )
a__ : Union[str, Any] =torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 148
| 0
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
SCREAMING_SNAKE_CASE_: str = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
SCREAMING_SNAKE_CASE_: Any = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE_: Any = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE_: int = f"layers_{str(_UpperCAmelCase )}"
# Self-Attention
SCREAMING_SNAKE_CASE_: Optional[int] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
SCREAMING_SNAKE_CASE_: int = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
SCREAMING_SNAKE_CASE_: str = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE_: int = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
SCREAMING_SNAKE_CASE_: Any = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: int = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
SCREAMING_SNAKE_CASE_: Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
SCREAMING_SNAKE_CASE_: List[str] = flax_model.params["encoder"]["block"][str(_UpperCAmelCase )]["layer"]
SCREAMING_SNAKE_CASE_: Any = tax_attention_key
SCREAMING_SNAKE_CASE_: str = tax_attention_out
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_attention_query
SCREAMING_SNAKE_CASE_: Optional[int] = tax_attention_value
SCREAMING_SNAKE_CASE_: Any = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_global_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: Any = tax_mlp_wi_a
SCREAMING_SNAKE_CASE_: Any = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE_: Optional[int] = tax_mlp_wi
SCREAMING_SNAKE_CASE_: str = tax_mlp_wo
SCREAMING_SNAKE_CASE_: int = tax_mlp_layer_norm
SCREAMING_SNAKE_CASE_: int = flax_model_encoder_layer_block
# Only for layer 0:
SCREAMING_SNAKE_CASE_: Tuple = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
SCREAMING_SNAKE_CASE_: List[str] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE_: List[Any] = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
SCREAMING_SNAKE_CASE_: Any = tax_encoder_global_rel_embedding
# Assigning
SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
SCREAMING_SNAKE_CASE_: Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE_: int = f"layers_{str(_UpperCAmelCase )}"
# Self-Attention
SCREAMING_SNAKE_CASE_: str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
SCREAMING_SNAKE_CASE_: int = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
SCREAMING_SNAKE_CASE_: List[Any] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
SCREAMING_SNAKE_CASE_: Any = tax_enc_dec_attention_module["key"]["kernel"]
SCREAMING_SNAKE_CASE_: int = tax_enc_dec_attention_module["out"]["kernel"]
SCREAMING_SNAKE_CASE_: Tuple = tax_enc_dec_attention_module["query"]["kernel"]
SCREAMING_SNAKE_CASE_: List[str] = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
SCREAMING_SNAKE_CASE_: int = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
SCREAMING_SNAKE_CASE_: int = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
SCREAMING_SNAKE_CASE_: List[str] = flax_model.params["decoder"]["block"][str(_UpperCAmelCase )]["layer"]
SCREAMING_SNAKE_CASE_: Optional[int] = tax_attention_key
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_attention_out
SCREAMING_SNAKE_CASE_: List[Any] = tax_attention_query
SCREAMING_SNAKE_CASE_: Optional[int] = tax_attention_value
SCREAMING_SNAKE_CASE_: List[Any] = tax_pre_attention_layer_norm
SCREAMING_SNAKE_CASE_: Optional[int] = tax_enc_dec_attention_key
SCREAMING_SNAKE_CASE_: List[Any] = tax_enc_dec_attention_out
SCREAMING_SNAKE_CASE_: List[Any] = tax_enc_dec_attention_query
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_enc_dec_attention_value
SCREAMING_SNAKE_CASE_: int = tax_cross_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE_: Optional[int] = tax_mlp_wi_a
SCREAMING_SNAKE_CASE_: Any = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE_: Any = tax_mlp_wi
SCREAMING_SNAKE_CASE_: Optional[Any] = tax_mlp_wo
SCREAMING_SNAKE_CASE_: Optional[int] = txa_mlp_layer_norm
SCREAMING_SNAKE_CASE_: Optional[Any] = flax_model_decoder_layer_block
# Decoder Normalization
SCREAMING_SNAKE_CASE_: Optional[int] = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
SCREAMING_SNAKE_CASE_: Any = txa_decoder_norm
# Only for layer 0:
SCREAMING_SNAKE_CASE_: Tuple = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
SCREAMING_SNAKE_CASE_: Any = tax_decoder_rel_embedding
# Token Embeddings
SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["token_embedder"]["embedding"]
SCREAMING_SNAKE_CASE_: Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
SCREAMING_SNAKE_CASE_: Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_UpperCAmelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 13
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85
| 0
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
SCREAMING_SNAKE_CASE_: Dict =imread(r'digital_image_processing/image_data/lena_small.jpg')
SCREAMING_SNAKE_CASE_: Optional[int] =cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCAmelCase_ = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCAmelCase_ = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def lowerCAmelCase_ ( snake_case_ : str = "digital_image_processing/image_data/lena_small.jpg" ) -> str:
'''simple docstring'''
UpperCAmelCase_ = bs.Burkes(imread(_UpperCamelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase_ ( snake_case_ : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
UpperCAmelCase_ = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = image[x_coordinate][y_coordinate]
UpperCAmelCase_ = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCAmelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCAmelCase_ = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 359
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
SCREAMING_SNAKE_CASE_: List[str] =get_logger()
SCREAMING_SNAKE_CASE_: Optional[dict] =None
class __A ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__(self : List[Any] , __a : Optional[int]=None , __a : Any=None , **__a : Dict ):
super().__init__(features=__a )
import jax
from jaxlib.xla_client import Device
if isinstance(__a , __a ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__a )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
UpperCAmelCase_ = device if isinstance(__a , __a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
UpperCAmelCase_ = str(jax.devices()[0] )
UpperCAmelCase_ = jnp_array_kwargs
@staticmethod
def _lowercase ():
import jax
return {str(__a ): device for device in jax.devices()}
def _lowercase (self : str , __a : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__a , __a ) and column:
if all(
isinstance(__a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__a , axis=0 )
return column
def _lowercase (self : Any , __a : Optional[int] ):
import jax
import jax.numpy as jnp
if isinstance(__a , (str, bytes, type(__a )) ):
return value
elif isinstance(__a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase_ = {}
if isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase_ = {"dtype": jnp.intaa}
else:
UpperCAmelCase_ = {"dtype": jnp.intaa}
elif isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__a , PIL.Image.Image ):
UpperCAmelCase_ = np.asarray(__a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__a , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase (self : int , __a : Any ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__a , "__array__" ) and not isinstance(__a , jax.Array ):
UpperCAmelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
elif isinstance(__a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
return self._tensorize(__a )
def _lowercase (self : Union[str, Any] , __a : dict ):
return map_nested(self._recursive_tensorize , __a , map_list=__a )
def _lowercase (self : str , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_row(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_row(__a )
return self.recursive_tensorize(__a )
def _lowercase (self : Tuple , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_column(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_column(__a , pa_table.column_names[0] )
UpperCAmelCase_ = self.recursive_tensorize(__a )
UpperCAmelCase_ = self._consolidate(__a )
return column
def _lowercase (self : str , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_batch(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_batch(__a )
UpperCAmelCase_ = self.recursive_tensorize(__a )
for column_name in batch:
UpperCAmelCase_ = self._consolidate(batch[column_name] )
return batch
| 106
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 113
|
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
lowercase_ , lowercase_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
lowercase_ = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
lowercase_ = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowercase_ = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 58
| 0
|
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [False] * len(UpperCamelCase__ )
snake_case_ = []
queue.append(UpperCamelCase__ )
snake_case_ = True
while queue:
snake_case_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase__ )
snake_case_ = True
snake_case_ = u
return visited[t]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [-1] * (len(UpperCamelCase__ ))
snake_case_ = 0
while bfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = float('Inf' )
snake_case_ = sink
while s != source:
# Find the minimum value in select path
snake_case_ = min(UpperCamelCase__ , graph[parent[s]][s] )
snake_case_ = parent[s]
max_flow += path_flow
snake_case_ = sink
while v != source:
snake_case_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ = parent[v]
return max_flow
_UpperCAmelCase : int = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_UpperCAmelCase , _UpperCAmelCase : str = 0, 5
print(ford_fulkerson(graph, source, sink))
| 200
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 200
| 1
|
def a__ ( _UpperCamelCase : float ):
return 10 - x * x
def a__ ( _UpperCamelCase : float ,_UpperCamelCase : float ):
if equation(__a ) * equation(__a ) >= 0:
raise ValueError('''Wrong space!''' )
__lowerCamelCase = a
while (b - a) >= 0.01:
# Find middle point
__lowerCamelCase = (a + b) / 2
# Check if middle point is root
if equation(__a ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__a ) * equation(__a ) < 0:
__lowerCamelCase = c
else:
__lowerCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 330
|
a__ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 235
| 0
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
A: str = "base_with_context"
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : str ):
UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
UpperCAmelCase : Any = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=UpperCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase : List[Any] = weights[F"layers_{lyr_num}"]
UpperCAmelCase : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
UpperCAmelCase : Optional[Any] = ly_weight["""attention"""]
UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCAmelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
UpperCAmelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=UpperCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase : int = weights[F"layers_{lyr_num}"]
UpperCAmelCase : List[str] = ly_weight["""attention"""]
UpperCAmelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCAmelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCAmelCase : str = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
UpperCAmelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
UpperCAmelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
UpperCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _snake_case ( UpperCamelCase : int , UpperCamelCase : Tuple ):
UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
UpperCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
UpperCAmelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=UpperCamelCase )
UpperCAmelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase : Dict = weights[F"layers_{lyr_num}"]
UpperCAmelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
UpperCAmelCase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
UpperCAmelCase : Union[str, Any] = ly_weight["""self_attention"""]
UpperCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCAmelCase : Union[str, Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
UpperCAmelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
UpperCAmelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
UpperCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
UpperCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
UpperCAmelCase : str = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
UpperCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def _snake_case ( UpperCamelCase : Optional[int] ):
UpperCAmelCase : Dict = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase : int = jnp.tree_util.tree_map(onp.array , UpperCamelCase )
UpperCAmelCase : Any = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
UpperCAmelCase : str = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
UpperCAmelCase : Tuple = inference.parse_training_gin_file(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : str = inference.InferenceModel(args.checkpoint_path , UpperCamelCase )
UpperCAmelCase : Optional[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
UpperCAmelCase : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
UpperCAmelCase : Tuple = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
UpperCAmelCase : Union[str, Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase : Dict = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , UpperCamelCase )
UpperCAmelCase : int = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , UpperCamelCase )
UpperCAmelCase : Dict = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , UpperCamelCase )
UpperCAmelCase : Dict = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
UpperCAmelCase : Tuple = SpectrogramDiffusionPipeline(
notes_encoder=UpperCamelCase , continuous_encoder=UpperCamelCase , decoder=UpperCamelCase , scheduler=UpperCamelCase , melgan=UpperCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
A: Dict = parser.parse_args()
main(args)
| 76
|
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : List[str]=[] ):
UpperCAmelCase : List[Any] = size[0] - overlap_pixels * 2
UpperCAmelCase : Dict = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase : Union[str, Any] = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
UpperCAmelCase : Any = np.pad(UpperCamelCase , mode="""linear_ramp""" , pad_width=UpperCamelCase , end_values=0 )
if "l" in remove_borders:
UpperCAmelCase : Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase : Optional[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase : Any = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase : str = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
return max(UpperCamelCase , min(UpperCamelCase , UpperCamelCase ) )
def _snake_case ( UpperCamelCase : [int] , UpperCamelCase : [int] , UpperCamelCase : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _snake_case ( UpperCamelCase : [int] , UpperCamelCase : int , UpperCamelCase : [int] ):
UpperCAmelCase : Optional[Any] = list(UpperCamelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase : List[str] = clamp_rect(UpperCamelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Dict ):
UpperCAmelCase : Dict = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCamelCase , (original_slice, 0) )
return result
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : List[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase : List[Any] = tile.crop(UpperCamelCase )
return tile
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = n % d
return n - divisor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 350 , ) -> List[Any]:
'''simple docstring'''
super().__init__(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , low_res_scheduler=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , max_noise_level=_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : List[str] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCAmelCase : Any = add_overlap_rect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , image.size )
UpperCAmelCase : int = image.crop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase : Dict = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase : Optional[int] = max(0 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = squeeze_tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = to_input.size
UpperCAmelCase : Dict = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCAmelCase : Optional[Any] = super(_SCREAMING_SNAKE_CASE , self ).__call__(image=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).images[0]
UpperCAmelCase : int = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCAmelCase : Union[str, Any] = unsqueeze_tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCAmelCase : int = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
UpperCAmelCase : Any = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_SCREAMING_SNAKE_CASE ) , mode="""L""" , )
final_image.paste(
_SCREAMING_SNAKE_CASE , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 75 , _SCREAMING_SNAKE_CASE = 9.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase : List[str] = math.ceil(image.size[0] / tile_size )
UpperCAmelCase : Optional[Any] = math.ceil(image.size[1] / tile_size )
UpperCAmelCase : Tuple = tcx * tcy
UpperCAmelCase : Any = 0
for y in range(_SCREAMING_SNAKE_CASE ):
for x in range(_SCREAMING_SNAKE_CASE ):
self._process_tile(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prompt=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , noise_level=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def _snake_case ( ):
# Run a demo
UpperCAmelCase : Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCAmelCase : int = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCamelCase , revision="""fp16""" , torch_dtype=torch.floataa )
UpperCAmelCase : Any = pipe.to("""cuda""" )
UpperCAmelCase : Optional[int] = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(UpperCamelCase : List[str] ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save("""diffusers_library_progress.jpg""" )
UpperCAmelCase : Any = pipe(image=UpperCamelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=UpperCamelCase )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 76
| 1
|
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ = np.zeros((n + 1,) )
lowerCamelCase_ = ya
lowerCamelCase_ = xa
for k in range(lowerCamelCase__ ):
lowerCamelCase_ = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 13 , SCREAMING_SNAKE_CASE = 64 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 128 , SCREAMING_SNAKE_CASE=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE = 7 , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 37 , SCREAMING_SNAKE_CASE = "gelu" , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 128 , SCREAMING_SNAKE_CASE = [2, 2, 2, 2] , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , ):
"""simple docstring"""
snake_case : int = parent
snake_case : List[Any] = batch_size
snake_case : List[str] = image_size
snake_case : int = patch_size
snake_case : int = num_channels
snake_case : Any = is_training
snake_case : int = use_labels
snake_case : Optional[Any] = hidden_size
snake_case : str = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : Dict = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : List[Any] = type_sequence_label_size
snake_case : Optional[Any] = initializer_range
snake_case : Any = encoder_stride
snake_case : Tuple = num_attention_outputs
snake_case : Dict = embed_dim
snake_case : Optional[Any] = embed_dim + 1
snake_case : Any = resolution
snake_case : int = depths
snake_case : int = hidden_sizes
snake_case : int = dim
snake_case : Tuple = mlp_expansion_ratio
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Optional[int] = None
if self.use_labels:
snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = TFEfficientFormerModel(config=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = self.type_sequence_label_size
snake_case : Tuple = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE )
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case : Tuple = 1
snake_case : Any = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE )
snake_case : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : Tuple = config_and_inputs
snake_case : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
a__ : Dict = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a__ : int = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a__ : int = False
a__ : List[str] = False
a__ : Union[str, Any] = False
a__ : Optional[Any] = False
a__ : str = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = TFEfficientFormerModelTester(self )
snake_case : Dict = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : str = model_class(SCREAMING_SNAKE_CASE )
snake_case : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[int] = [*signature.parameters.keys()]
snake_case : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : List[str] = model_class(SCREAMING_SNAKE_CASE )
snake_case : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , training=SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case : List[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
if hasattr(self.model_tester , "encoder_seq_length" ):
snake_case : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
snake_case : Optional[int] = seq_length * self.model_tester.chunk_length
else:
snake_case : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case : List[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(SCREAMING_SNAKE_CASE , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
snake_case : Tuple = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE )
snake_case : Tuple = getattr(self.model_tester , "decoder_seq_length" , SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
snake_case : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : str = TFEfficientFormerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : str = True
snake_case : Tuple = getattr(self.model_tester , "seq_length" , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = getattr(self.model_tester , "key_length" , SCREAMING_SNAKE_CASE )
snake_case : Tuple = getattr(self.model_tester , "chunk_length" , SCREAMING_SNAKE_CASE )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
snake_case : Optional[int] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case : Optional[int] = True
snake_case : List[Any] = False
snake_case : Optional[int] = True
snake_case : List[str] = model_class(SCREAMING_SNAKE_CASE )
snake_case : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , training=SCREAMING_SNAKE_CASE )
snake_case : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case : Tuple = True
snake_case : List[str] = model_class(SCREAMING_SNAKE_CASE )
snake_case : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , training=SCREAMING_SNAKE_CASE )
snake_case : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=SCREAMING_SNAKE_CASE )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case : Any = model(SCREAMING_SNAKE_CASE )
self.assertTrue(outputs_dict is not None )
def UpperCamelCase__ ( ):
snake_case : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
snake_case : List[Any] = self.default_image_processor
snake_case : Optional[Any] = prepare_img()
snake_case : int = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# forward pass
snake_case : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# verify the logits
snake_case : int = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
snake_case : Dict = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
snake_case : int = self.default_image_processor
snake_case : List[Any] = prepare_img()
snake_case : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# forward pass
snake_case : Any = model(**SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# verify the logits
snake_case : Any = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 148
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[Any] = MgpstrTokenizer
UpperCamelCase : int = False
UpperCamelCase : int = {}
UpperCamelCase : List[str] = False
def __A ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
lowerCamelCase = dict(zip(_A , range(len(_A ) ) ) )
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
def __A ( self , **A ) -> Optional[int]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A )
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = 'tester'
lowerCamelCase = 'tester'
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
pass
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowerCamelCase = tokenizer.encode([special_token] , add_special_tokens=_A )
self.assertEqual(len(_A ) , 1 )
lowerCamelCase = tokenizer.decode(_A , skip_special_tokens=_A )
self.assertTrue(special_token not in decoded )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCamelCase = self.get_input_output_texts(_A )
lowerCamelCase = tokenizer.tokenize(_A )
lowerCamelCase = tokenizer.convert_tokens_to_ids(_A )
lowerCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
lowerCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertNotEqual(len(_A ) , 0 )
lowerCamelCase = tokenizer.decode(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(text_a.replace(""" """ , """""" ) , _A )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 356
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A , A )
def __A ( self , **A ) -> Optional[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> List[Any]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __A ( self , **A ) -> Optional[int]:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **A )
def __A ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = self.get_image_processor()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=A , padding_value=1.0 )
lowerCamelCase = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(A , return_tensors="""np""" )
lowerCamelCase = processor(images=A , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = """lower newer"""
lowerCamelCase = processor(text=A )
lowerCamelCase = tokenizer(A , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(A )
lowerCamelCase = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = AlignProcessor(tokenizer=A , image_processor=A )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 66
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=1_3 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : int=True , UpperCamelCase__ : List[str]=9_9 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : int=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : int=3_7 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : int=5_1_2 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Optional[Any]=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Optional[int]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Tuple ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = DistilBertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = DistilBertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = DistilBertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = DistilBertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = DistilBertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = DistilBertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = DistilBertModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , dim=3_7 )
def A ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase__ )
@slow
def A ( self : Tuple ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = DistilBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@slow
@require_torch_gpu
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
UpperCamelCase = True
UpperCamelCase = model_class(config=UpperCamelCase__ )
UpperCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = torch.jit.trace(
UpperCamelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , 'traced_model.pt' ) )
UpperCamelCase = torch.jit.load(os.path.join(UpperCamelCase__ , 'traced_model.pt' ) , map_location=UpperCamelCase__ )
loaded(inputs_dict['input_ids'].to(UpperCamelCase__ ) , inputs_dict['attention_mask'].to(UpperCamelCase__ ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = DistilBertModel.from_pretrained('distilbert-base-uncased' )
UpperCamelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
UpperCamelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 28
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCamelCase : Optional[Any] = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = ["dtype"]
lowercase__ = []
lowercase__ = True
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,lowercase_ : Dict[str, Any] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[int]=False ,**lowercase_ : Any ,):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = cls.load_config(
pretrained_model_name_or_path=lowercase_ ,subfolder=lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = cls.from_config(lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ )
if hasattr(lowercase_ ,'''create_state''' ) and getattr(lowercase_ ,'''has_state''' ,lowercase_ ):
lowerCAmelCase__ : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = False ,**lowercase_ : str ):
self.save_config(save_directory=lowercase_ ,push_to_hub=lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
lowerCAmelCase__ : Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ : Tuple = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase__ : Union[str, Any] = [
getattr(lowercase_ ,lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ ,lowercase_ )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
assert len(A_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A_ ) - x.ndim) ) , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=0.999 , A_=jnp.floataa ):
def alpha_bar(A_ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase__ : Optional[Any] = []
for i in range(A_ ):
lowerCAmelCase__ : str = i / num_diffusion_timesteps
lowerCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A_ ) / alpha_bar(A_ ) , A_ ) )
return jnp.array(A_ , dtype=A_ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ : Any = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ : Union[str, Any] = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : int = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase__ : str = 1.0 - betas
lowerCAmelCase__ : Union[str, Any] = jnp.cumprod(lowercase_ ,axis=0 )
return cls(
alphas=lowercase_ ,betas=lowercase_ ,alphas_cumprod=lowercase_ ,)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Any = state.alphas_cumprod
lowerCAmelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ : Tuple = sqrt_alpha_prod.flatten()
lowerCAmelCase__ : str = broadcast_to_shape_from_left(A_ , original_samples.shape )
lowerCAmelCase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ : Optional[int] = broadcast_to_shape_from_left(A_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 106
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : Tuple = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Dict = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Any = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase_ : str = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase_ : Optional[int] = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
lowerCAmelCase_ : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ : Optional[int] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase_ : List[str] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class UpperCamelCase_ ( a_ ):
_A : List[str] = VOCAB_FILES_NAMES
_A : str = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A : Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A : Optional[int] = DPRContextEncoderTokenizer
class UpperCamelCase_ ( a_ ):
_A : Optional[int] = VOCAB_FILES_NAMES
_A : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A : str = DPRQuestionEncoderTokenizer
lowerCAmelCase_ : Optional[int] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase_ : List[str] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase_ : List[str] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class UpperCamelCase_ :
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = None , **snake_case__ , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
elif titles is None or texts is None:
UpperCAmelCase = titles if texts is None else texts
return super().__call__(
snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
UpperCAmelCase = titles if not isinstance(snake_case__ , snake_case__ ) else [titles]
UpperCAmelCase = texts if not isinstance(snake_case__ , snake_case__ ) else [texts]
UpperCAmelCase = len(snake_case__ )
UpperCAmelCase = questions if not isinstance(snake_case__ , snake_case__ ) else [questions] * n_passages
assert len(snake_case__ ) == len(
snake_case__ ), f'''There should be as many titles than texts but got {len(snake_case__ )} titles and {len(snake_case__ )} texts.'''
UpperCAmelCase = super().__call__(snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ )["""input_ids"""]
UpperCAmelCase = super().__call__(snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ )["""input_ids"""]
UpperCAmelCase = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case__ , snake_case__ )
]
}
if return_attention_mask is not False:
UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase = attention_mask
return self.pad(snake_case__ , padding=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = 16 , snake_case__ = 64 , snake_case__ = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
UpperCAmelCase = reader_input["""input_ids"""]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = reader_output[:3]
UpperCAmelCase = len(snake_case__ )
UpperCAmelCase = sorted(range(snake_case__ ) , reverse=snake_case__ , key=relevance_logits.__getitem__ )
UpperCAmelCase = []
for doc_id in sorted_docs:
UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase = len(snake_case__ )
UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case__ , top_spans=snake_case__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case__ , start_index=snake_case__ , end_index=snake_case__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
UpperCAmelCase = []
for start_index, start_score in enumerate(snake_case__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase = sorted(snake_case__ , key=lambda snake_case__ : x[1] , reverse=snake_case__ )
UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class UpperCamelCase_ ( a_ , a_ ):
_A : int = VOCAB_FILES_NAMES
_A : Dict = READER_PRETRAINED_VOCAB_FILES_MAP
_A : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_A : Dict = ['input_ids', 'attention_mask']
_A : Optional[Any] = DPRReaderTokenizer
| 371
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase , lowerCAmelCase )
# Predict target for test data
UpperCAmelCase = xgb.predict(lowerCAmelCase )
UpperCAmelCase = predictions.reshape(len(lowerCAmelCase ) , 1 )
return predictions
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = fetch_california_housing()
UpperCAmelCase , UpperCAmelCase = data_handling(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_test_split(
lowerCAmelCase , lowerCAmelCase , test_size=0.25 , random_state=1 )
UpperCAmelCase = xgboost(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase , lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCAmelCase , lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 248
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = StableUnCLIPImgaImgPipeline
A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ : List[Any] = frozenset([] )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = 32
_SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
_SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : int = AutoencoderKL()
_SCREAMING_SNAKE_CASE : Dict = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def UpperCAmelCase_ ( self , __snake_case , __snake_case=0 , __snake_case=True ):
if str(__snake_case ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(__snake_case )
else:
_SCREAMING_SNAKE_CASE : int = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
_SCREAMING_SNAKE_CASE : Optional[Any] = input_image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_SCREAMING_SNAKE_CASE : List[Any] = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Dict = StableUnCLIPImgaImgPipeline(**__snake_case )
_SCREAMING_SNAKE_CASE : Dict = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
_SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(__snake_case )
inputs.update({"""image_embeds""": None} )
_SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**__snake_case ).images
_SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE : Dict = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_SCREAMING_SNAKE_CASE : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
_SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(__snake_case , """anime turle""" , generator=__snake_case , output_type="""np""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
_SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE : Any = pipe(__snake_case , """anime turle""" , generator=__snake_case , output_type="""np""" )
_SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE : int = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE : List[Any] = pipe(
__snake_case , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 200
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : UNetaDModel
A_ : ScoreSdeVeScheduler
def __init__( self , __snake_case , __snake_case ):
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self , __snake_case = 1 , __snake_case = 2000 , __snake_case = None , __snake_case = "pil" , __snake_case = True , **__snake_case , ):
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
_SCREAMING_SNAKE_CASE : Optional[Any] = (batch_size, 3, img_size, img_size)
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet
_SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE : Any = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_SCREAMING_SNAKE_CASE : Any = self.unet(__snake_case , __snake_case ).sample
_SCREAMING_SNAKE_CASE : Any = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__snake_case , __snake_case ).sample
_SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = output.prev_sample, output.prev_sample_mean
_SCREAMING_SNAKE_CASE : Tuple = sample_mean.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 200
| 1
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "linear"
__magic_name__ = "cosine"
__magic_name__ = "cosine_with_restarts"
__magic_name__ = "polynomial"
__magic_name__ = "constant"
__magic_name__ = "constant_with_warmup"
__magic_name__ = "piecewise_constant"
def lowercase (_A , _A = -1 ):
"""simple docstring"""
return LambdaLR(_A , lambda _A : 1 , last_epoch=_A )
def lowercase (_A , _A , _A = -1 ):
"""simple docstring"""
def lr_lambda(_A ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1.0 , _A ) )
return 1.0
return LambdaLR(_A , _A , last_epoch=_A )
def lowercase (_A , _A , _A = -1 ):
"""simple docstring"""
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : List[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
_lowerCAmelCase , _lowerCAmelCase : int = rule_str.split(':' )
_lowerCAmelCase : int = int(_A )
_lowerCAmelCase : str = float(_A )
_lowerCAmelCase : Optional[Any] = value
_lowerCAmelCase : List[Any] = float(rule_list[-1] )
def create_rules_function(_A , _A ):
def rule_func(_A ) -> float:
_lowerCAmelCase : List[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_lowerCAmelCase : Dict = create_rules_function(_A , _A )
return LambdaLR(_A , _A , last_epoch=_A )
def lowercase (_A , _A , _A , _A=-1 ):
"""simple docstring"""
def lr_lambda(_A ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_A , _A , _A )
def lowercase (_A , _A , _A , _A = 0.5 , _A = -1 ):
"""simple docstring"""
def lr_lambda(_A ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
_lowerCAmelCase : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_A ) * 2.0 * progress )) )
return LambdaLR(_A , _A , _A )
def lowercase (_A , _A , _A , _A = 1 , _A = -1 ):
"""simple docstring"""
def lr_lambda(_A ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
_lowerCAmelCase : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_A ) * progress) % 1.0) )) )
return LambdaLR(_A , _A , _A )
def lowercase (_A , _A , _A , _A=1E-7 , _A=1.0 , _A=-1 ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(_A ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_lowerCAmelCase : List[str] = lr_init - lr_end
_lowerCAmelCase : Optional[int] = num_training_steps - num_warmup_steps
_lowerCAmelCase : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
_lowerCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_A , _A , _A )
lowerCAmelCase : List[str] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowercase (_A , _A , _A = None , _A = None , _A = None , _A = 1 , _A = 1.0 , _A = -1 , ):
"""simple docstring"""
_lowerCAmelCase : str = SchedulerType(_A )
_lowerCAmelCase : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_A , last_epoch=_A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_A , step_rules=_A , last_epoch=_A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_A , num_warmup_steps=_A , last_epoch=_A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , num_cycles=_A , last_epoch=_A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , power=_A , last_epoch=_A , )
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , last_epoch=_A )
| 25
|
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
a_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a_ = 12_8022
a_ = 12_8028
@require_sentencepiece
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MaMaaaTokenizer
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
SCREAMING_SNAKE_CASE : str = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(self.tmpdirname )
save_json(a , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a , save_dir / VOCAB_FILES_NAMES["spm_file"] )
SCREAMING_SNAKE_CASE : List[str] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : str , **a : Dict ) -> List[Any]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **a )
def __UpperCamelCase ( self : List[str] , a : List[str] ) -> Any:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = "</s>"
SCREAMING_SNAKE_CASE : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [2, 3, 4, 5, 6] , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_string(a )
self.assertEqual(a , "This is a test" )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ ='facebook/m2m100_418M'
lowerCamelCase__ =[
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCamelCase__ =[
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCamelCase__ =[EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __UpperCamelCase ( cls : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
return cls
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_8063 )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.tokenizer.get_vocab()
self.assertEqual(len(a ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , a )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "en"
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
self.assertIn(a , self.tokenizer.all_special_ids )
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
SCREAMING_SNAKE_CASE : str = self.tokenizer.decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(a )
SCREAMING_SNAKE_CASE : int = MaMaaaTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.lang_token_to_id , a )
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = "en"
SCREAMING_SNAKE_CASE : List[Any] = "fr"
SCREAMING_SNAKE_CASE : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
SCREAMING_SNAKE_CASE : int = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
SCREAMING_SNAKE_CASE : List[str] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
SCREAMING_SNAKE_CASE : Any = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
SCREAMING_SNAKE_CASE : int = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(a ) , {
# en_XX, A, test, EOS
"input_ids": [[12_8022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 12_8006,
} , )
| 76
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-50")
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE : int = ResNetConfig.from_pretrained("microsoft/resnet-101")
else:
raise ValueError("Model name should include either resnet50 or resnet101")
SCREAMING_SNAKE_CASE : str = DetrConfig(use_timm_backbone=_a , backbone_config=_a)
# set label attributes
SCREAMING_SNAKE_CASE : List[str] = "panoptic" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE : Union[str, Any] = 250
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 91
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Union[str, Any] = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type="dataset") , "r"))
SCREAMING_SNAKE_CASE : int = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCamelCase__ ( _a):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
# stages
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
))
# 3 convs
for i in range(3):
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
))
rename_keys.append(
(
f"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
f"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
))
# fmt: on
for i in range(config.encoder_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
f"encoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
f"decoder.layers.{i}.self_attn.out_proj.weight",
))
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
))
rename_keys.append(
(
f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
])
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : str = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : int = val
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Optional[Any] = ""
if is_panoptic:
SCREAMING_SNAKE_CASE : Optional[int] = "detr."
# first: transformer encoder
for i in range(6):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : str = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = get_detr_config(_a)
# load original model from torch hub
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f"Converting model {model_name}...")
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_a).eval()
SCREAMING_SNAKE_CASE : Tuple = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_a):
if is_panoptic:
SCREAMING_SNAKE_CASE : List[str] = "detr." + src
rename_key(_a , _a , _a)
# query, key and value matrices need special treatment
read_in_q_k_v(_a , is_panoptic=_a)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : List[Any] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr")
and not key.startswith("class_labels_classifier")
and not key.startswith("bbox_predictor")
):
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
continue
else:
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : List[Any] = val
else:
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Any = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = DetrForSegmentation(_a) if is_panoptic else DetrForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE : int = "coco_panoptic" if is_panoptic else "coco_detection"
SCREAMING_SNAKE_CASE : Optional[int] = DetrImageProcessor(format=_a)
SCREAMING_SNAKE_CASE : List[str] = processor(images=prepare_img() , return_tensors="pt")
SCREAMING_SNAKE_CASE : Any = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Optional[Any] = detr(_a)
SCREAMING_SNAKE_CASE : Any = model(_a)
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3)
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3)
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub...")
model.push_to_hub(f"nielsr/{model_name}")
processor.push_to_hub(f"nielsr/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 76
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 261
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_A = logging.get_logger(__name__)
_A = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_A = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_A = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_A = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
_A = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
_A = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
_A = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_A = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_A = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_A = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_A = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
class lowercase_ :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
UpperCamelCase_ = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
UpperCamelCase_ = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
UpperCamelCase_ = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
UpperCamelCase_ = len(__UpperCamelCase )
UpperCamelCase_ = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
f'''There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts.''' )
UpperCamelCase_ = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["""input_ids"""]
UpperCamelCase_ = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["""input_ids"""]
UpperCamelCase_ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
UpperCamelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase_ = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = 6_4 , __UpperCamelCase = 4 , ):
"""simple docstring"""
UpperCamelCase_ = reader_input["""input_ids"""]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = reader_output[:3]
UpperCamelCase_ = len(__UpperCamelCase )
UpperCamelCase_ = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
UpperCamelCase_ = []
for doc_id in sorted_docs:
UpperCamelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase_ = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase_ = len(__UpperCamelCase )
UpperCamelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase_ = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
UpperCamelCase_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCamelCase_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
A__ : Tuple = ["""input_ids""", """attention_mask"""]
| 261
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : str = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 93
|
"""simple docstring"""
import math
class lowerCamelCase :
'''simple docstring'''
def lowerCAmelCase_ ( self: Tuple , snake_case: list[list[float]] , snake_case: list[int] ) -> int:
snake_case_ :Any = 0.0
snake_case_ :Tuple = 0.0
for i in range(len(snake_case ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self: Optional[int] , snake_case: list[list[int | float]] , snake_case: list[int] , snake_case: int , snake_case: float ) -> list[list[int | float]]:
for i in range(len(snake_case ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A_ ( ):
'''simple docstring'''
snake_case_ :Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case_ :List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case_ :Optional[Any] = SelfOrganizingMap()
snake_case_ :Dict = 3
snake_case_ :Dict = 0.5
for _ in range(_lowercase ):
for j in range(len(_lowercase ) ):
# training sample
snake_case_ :List[Any] = training_samples[j]
# Compute the winning vector
snake_case_ :Optional[int] = self_organizing_map.get_winner(_lowercase, _lowercase )
# Update the winning vector
snake_case_ :List[str] = self_organizing_map.update(_lowercase, _lowercase, _lowercase, _lowercase )
# classify test sample
snake_case_ :str = [0, 0, 0, 1]
snake_case_ :List[Any] = self_organizing_map.get_winner(_lowercase, _lowercase )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 66
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__:int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _lowerCamelCase( a , a , a ):
__a = state_dict.pop(a_ )
__a = val
def _lowerCamelCase( a ):
__a = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__a = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
__a = value
else:
__a = value
return new_state_dict
def _lowerCamelCase( a , a=False ):
__a = ""
if is_panoptic:
__a = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__a = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__a = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[:2_5_6, :]
__a = in_proj_bias[:2_5_6]
__a = in_proj_weight[2_5_6:5_1_2, :]
__a = in_proj_bias[2_5_6:5_1_2]
__a = in_proj_weight[-2_5_6:, :]
__a = in_proj_bias[-2_5_6:]
def _lowerCamelCase( ):
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def _lowerCamelCase( a , a ):
__a = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__a = "resnet101"
if "dc5" in model_name:
__a = True
__a = "panoptic" in model_name
if is_panoptic:
__a = 2_5_0
else:
__a = 9_1
__a = "huggingface/label-files"
__a = "coco-detection-id2label.json"
__a = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
__a = {int(a_ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# load image processor
__a = "coco_panoptic" if is_panoptic else "coco_detection"
__a = ConditionalDetrImageProcessor(format=a_ )
# prepare image
__a = prepare_img()
__a = image_processor(images=a_ , return_tensors="pt" )
__a = encoding["pixel_values"]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
__a = torch.hub.load("DeppMeng/ConditionalDETR" , a_ , pretrained=a_ ).eval()
__a = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__a = "conditional_detr." + src
rename_key(a_ , a_ , a_ )
__a = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__a = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
__a = state_dict.pop(a_ )
__a = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__a = state_dict.pop(a_ )
__a = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
__a = state_dict.pop(a_ )
__a = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__a = state_dict.pop(a_ )
__a = val
# finally, create HuggingFace model and load state dict
__a = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
__a = conditional_detr(a_ )
__a = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 364
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE__:List[str] = 3
def _lowerCamelCase( a ):
print("Generating primitive root of p" )
while True:
__a = random.randrange(3 , a )
if pow(a , 2 , a ) == 1:
continue
if pow(a , a , a ) == 1:
continue
return g
def _lowerCamelCase( a ):
print("Generating prime p..." )
__a = rabin_miller.generate_large_prime(a ) # select large prime number.
__a = primitive_root(a ) # one primitive root on modulo p.
__a = random.randrange(3 , a ) # private_key -> have to be greater than 2 for safety.
__a = cryptomath.find_mod_inverse(pow(a , a , a ) , a )
__a = (key_size, e_a, e_a, p)
__a = (key_size, d)
return public_key, private_key
def _lowerCamelCase( a , a ):
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print("\nWARNING:" )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"Use a different name or delete these files and re-run this program." )
sys.exit()
__a , __a = generate_key(a )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , "w" ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , "w" ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def _lowerCamelCase( ):
print("Making key files..." )
make_key_files("elgamal" , 2_0_4_8 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 268
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ : int = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def a_ ( lowerCamelCase ):
re.sub('<n>' , '' , a__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(a__ ) )
| 98
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]:
super().__init__(*_lowercase , **_lowercase )
a_ : Optional[int] = eval_examples
a_ : Tuple = post_process_function
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = "eval" ) -> Union[str, Any]:
a_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[str] = self.get_eval_dataloader(_lowercase )
a_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : Optional[int] = self.compute_metrics
a_ : List[str] = None
a_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Any = time.time()
try:
a_ : Union[str, Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Dict = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ : List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions )
a_ : Optional[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : List[str] = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
a_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" ) -> str:
a_ : Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[Any] = self.compute_metrics
a_ : int = None
a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Union[str, Any] = time.time()
try:
a_ : List[str] = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Optional[Any] = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" )
a_ : List[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : int = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 248
| 0
|
from functools import lru_cache
@lru_cache
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ : List[str] = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25
|
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25
| 1
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _a ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
A = None
A = None
@property
def __snake_case (self ) -> List[str]:
return self.feat_extract_tester.prepare_feat_extract_dict()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """feature_size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """sampling_rate""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """padding_value""" ) )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase_: str = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_: str = feat_extract.model_input_names[0]
UpperCAmelCase_: Dict = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) for x, y in zip(SCREAMING_SNAKE_CASE_, processed_features[input_name] ) ) )
UpperCAmelCase_: Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = BatchFeature({input_name: speech_inputs}, tensor_type="""np""" )
UpperCAmelCase_: List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_: str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_: Any = feat_extract.model_input_names[0]
UpperCAmelCase_: Dict = BatchFeature({input_name: speech_inputs}, tensor_type="""pt""" )
UpperCAmelCase_: Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_: Dict = feat_extract.model_input_names[0]
UpperCAmelCase_: str = BatchFeature({input_name: speech_inputs}, tensor_type="""tf""" )
UpperCAmelCase_: Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_: str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_=False ) -> Tuple:
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Dict = len(input[0] )
for input_slice in input[1:]:
if len(SCREAMING_SNAKE_CASE_ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
return False
for input_slice_a, input_slice_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
if not np.allclose(np.asarray(SCREAMING_SNAKE_CASE_ ), np.asarray(SCREAMING_SNAKE_CASE_ ), atol=1E-3 ):
return False
return True
UpperCAmelCase_: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = feat_extract.model_input_names[0]
UpperCAmelCase_: Any = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_: int = self.feat_extract_tester.seq_length_diff
UpperCAmelCase_: Tuple = self.feat_extract_tester.max_seq_length + pad_diff
UpperCAmelCase_: Optional[int] = self.feat_extract_tester.min_seq_length
UpperCAmelCase_: List[str] = self.feat_extract_tester.batch_size
UpperCAmelCase_: int = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCAmelCase_: List[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = input_a[input_name]
UpperCAmelCase_: Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""longest""" )
UpperCAmelCase_: Dict = input_a[input_name]
UpperCAmelCase_: Tuple = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=len(speech_inputs[-1] ) )
UpperCAmelCase_: Tuple = input_a[input_name]
UpperCAmelCase_: Any = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""longest""", return_tensors="""np""" )
UpperCAmelCase_: Dict = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""max_length""" )[input_name]
UpperCAmelCase_: int = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=SCREAMING_SNAKE_CASE_, return_tensors="""np""" )
UpperCAmelCase_: Union[str, Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase_: str = feat_extract.pad(SCREAMING_SNAKE_CASE_, pad_to_multiple_of=10 )
UpperCAmelCase_: Dict = input_a[input_name]
UpperCAmelCase_: Any = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""longest""", pad_to_multiple_of=10 )
UpperCAmelCase_: str = input_a[input_name]
UpperCAmelCase_: str = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", pad_to_multiple_of=10, max_length=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = input_a[input_name]
UpperCAmelCase_: List[Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", pad_to_multiple_of=10, max_length=SCREAMING_SNAKE_CASE_, return_tensors="""np""", )
UpperCAmelCase_: List[Any] = input_a[input_name]
self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Optional[int] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2], (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCAmelCase_: List[Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __snake_case (self, SCREAMING_SNAKE_CASE_=False ) -> Tuple:
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Dict = len(input[0] )
for input_slice in input[1:]:
if len(SCREAMING_SNAKE_CASE_ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
return False
for input_slice_a, input_slice_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
if not np.allclose(np.asarray(SCREAMING_SNAKE_CASE_ ), np.asarray(SCREAMING_SNAKE_CASE_ ), atol=1E-3 ):
return False
return True
UpperCAmelCase_: Dict = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_: str = self.feat_extract_tester.prepare_inputs_for_common(numpify=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = feat_extract.model_input_names[0]
UpperCAmelCase_: Optional[int] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCAmelCase_: List[Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=len(speech_inputs[0] ), truncation=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = input_a[input_name]
UpperCAmelCase_: Any = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=len(speech_inputs[0] ) )
UpperCAmelCase_: Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
# truncate to smallest with np
UpperCAmelCase_: int = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=len(speech_inputs[0] ), return_tensors="""np""", truncation=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = input_a[input_name]
UpperCAmelCase_: List[Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=len(speech_inputs[0] ), return_tensors="""np""" )
UpperCAmelCase_: List[str] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
# truncate to middle
UpperCAmelCase_: List[str] = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=len(speech_inputs[1] ), truncation=SCREAMING_SNAKE_CASE_, return_tensors="""np""", )
UpperCAmelCase_: Any = input_a[input_name]
UpperCAmelCase_: List[str] = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=len(speech_inputs[1] ), truncation=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = input_a[input_name]
UpperCAmelCase_: Optional[Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=len(speech_inputs[1] ), return_tensors="""np""" )
UpperCAmelCase_: int = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
feat_extract.pad(SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""longest""", truncation=SCREAMING_SNAKE_CASE_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""longest""", truncation=SCREAMING_SNAKE_CASE_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""max_length""", truncation=SCREAMING_SNAKE_CASE_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase_: List[Any] = 12
UpperCAmelCase_: str = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=len(speech_inputs[0] ), pad_to_multiple_of=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Any = input_a[input_name]
UpperCAmelCase_: Dict = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=len(speech_inputs[0] ), pad_to_multiple_of=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCAmelCase_: Optional[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCAmelCase_: List[Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> List[Any]:
self._check_padding(numpify=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Dict:
self._check_padding(numpify=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
self._check_truncation(numpify=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
self._check_truncation(numpify=SCREAMING_SNAKE_CASE_ )
@require_torch
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_: int = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase_: int = feat_extract.model_input_names[0]
UpperCAmelCase_: Dict = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_: str = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""longest""", return_tensors="""np""" )[input_name]
UpperCAmelCase_: Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""longest""", return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __snake_case (self ) -> int:
UpperCAmelCase_: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase_: Tuple = feat_extract.model_input_names[0]
UpperCAmelCase_: List[str] = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_: Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""longest""", return_tensors="""np""" )[input_name]
UpperCAmelCase_: Tuple = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""longest""", return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Dict = self.feat_extract_dict
UpperCAmelCase_: str = True
UpperCAmelCase_: List[Any] = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase_: str = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
UpperCAmelCase_: List[Any] = feat_extract.model_input_names[0]
UpperCAmelCase_: Dict = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_: int = feat_extract.pad(SCREAMING_SNAKE_CASE_, padding="""longest""", return_tensors="""np""" )
self.assertIn("""attention_mask""", SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_: Union[str, Any] = self.feat_extract_dict
UpperCAmelCase_: str = True
UpperCAmelCase_: Union[str, Any] = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase_: Any = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
UpperCAmelCase_: Dict = feat_extract.model_input_names[0]
UpperCAmelCase_: Dict = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_: Dict = min(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE_, padding="""max_length""", max_length=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, return_tensors="""np""" )
self.assertIn("""attention_mask""", SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] )
| 82
|
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: list[int | float] , lowerCAmelCase__: int , lowerCAmelCase__: int ):
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(lowerCAmelCase__ )
or left < -len(lowerCAmelCase__ )
or right >= len(lowerCAmelCase__ )
or right < -len(lowerCAmelCase__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
UpperCAmelCase_: int = (left + right) >> 1 # the middle
UpperCAmelCase_: List[Any] = find_max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # find max in range[left, mid]
UpperCAmelCase_: Any = find_max(lowerCAmelCase__ , mid + 1 , lowerCAmelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 82
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
@property
def a__ ( self ):
torch.manual_seed(0 )
__a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def a__ ( self ):
__a = self.dummy_uncond_unet
__a = ScoreSdeVeScheduler()
__a = ScoreSdeVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
sde_ve.to(lowerCamelCase )
sde_ve.set_progress_bar_config(disable=lowerCamelCase )
__a = torch.manual_seed(0 )
__a = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowerCamelCase ).images
__a = torch.manual_seed(0 )
__a = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowerCamelCase , return_dict=lowerCamelCase )[
0
]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = "google/ncsnpp-church-256"
__a = UNetaDModel.from_pretrained(lowerCamelCase )
__a = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase )
__a = ScoreSdeVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
sde_ve.to(lowerCamelCase )
sde_ve.set_progress_bar_config(disable=lowerCamelCase )
__a = torch.manual_seed(0 )
__a = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__a = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 261
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__:int = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """upernet"""
def __init__( self , lowerCamelCase=None , lowerCamelCase=512 , lowerCamelCase=0.02 , lowerCamelCase=[1, 2, 3, 6] , lowerCamelCase=True , lowerCamelCase=0.4 , lowerCamelCase=384 , lowerCamelCase=256 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=255 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__a = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__a = backbone_config.get("model_type" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(lowerCamelCase )
__a = backbone_config
__a = hidden_size
__a = initializer_range
__a = pool_scales
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_in_channels
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = loss_ignore_index
def a__ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 261
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "audio-spectrogram-transformer"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=16 , snake_case__=True , snake_case__=10 , snake_case__=10 , snake_case__=1024 , snake_case__=128 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = patch_size
_lowerCAmelCase : Dict = qkv_bias
_lowerCAmelCase : Any = frequency_stride
_lowerCAmelCase : List[Any] = time_stride
_lowerCAmelCase : Any = max_length
_lowerCAmelCase : Optional[Any] = num_mel_bins
| 25
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int:
'''simple docstring'''
A__ = -1
A__ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
A__ = (n * n - 2 * a * n) // (2 * n - 2 * a)
A__ = n - a - b
if c * c == (a * a + b * b):
A__ = a * b * c
if candidate >= product:
A__ = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 7
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase_ :
def __init__( self : str ) -> Dict:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : int = ""
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 256
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 )
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[Any] = x[i] / self.k
self.sk += prk
UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase_ : Any = int(last % last )
UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase_ : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase_ : Any = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase_ : Tuple = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 268
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] ="vision-encoder-decoder"
UpperCamelCase__ : Optional[int] =True
def __init__( self :Dict , **_lowercase :Optional[int]) -> Tuple:
super().__init__(**_lowercase)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
UpperCAmelCase_ = kwargs.pop('''encoder''')
UpperCAmelCase_ = encoder_config.pop('''model_type''')
UpperCAmelCase_ = kwargs.pop('''decoder''')
UpperCAmelCase_ = decoder_config.pop('''model_type''')
UpperCAmelCase_ = AutoConfig.for_model(_lowercase , **_lowercase)
UpperCAmelCase_ = AutoConfig.for_model(_lowercase , **_lowercase)
UpperCAmelCase_ = True
@classmethod
def __a ( cls :List[Any] , _lowercase :PretrainedConfig , _lowercase :PretrainedConfig , **_lowercase :List[str]) -> PretrainedConfig:
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
UpperCAmelCase_ = True
UpperCAmelCase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowercase)
def __a ( self :List[Any]) -> Any:
UpperCAmelCase_ = copy.deepcopy(self.__dict__)
UpperCAmelCase_ = self.encoder.to_dict()
UpperCAmelCase_ = self.decoder.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =version.parse("1.11" )
@property
def __a ( self :int) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
@property
def __a ( self :Optional[int]) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}})
class a_ ( _snake_case ):
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ = OrderedDict()
UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def __a ( self :Union[str, Any] , _lowercase :"PreTrainedTokenizerBase" , _lowercase :int = -1 , _lowercase :int = -1 , _lowercase :bool = False , _lowercase :Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
UpperCAmelCase_ = OrderedDict()
UpperCAmelCase_ = super().generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase)
UpperCAmelCase_ , UpperCAmelCase_ = dummy_input['''input_ids'''].shape
UpperCAmelCase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCAmelCase_ = dummy_input.pop('''input_ids''')
UpperCAmelCase_ = dummy_input.pop('''attention_mask''')
UpperCAmelCase_ = torch.zeros(_lowercase)
return common_inputs
class a_ ( _snake_case ):
@property
def __a ( self :str) -> None:
pass
def __a ( self :Dict , _lowercase :PretrainedConfig) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowercase)
def __a ( self :Any , _lowercase :PretrainedConfig , _lowercase :PretrainedConfig , _lowercase :str = "default") -> OnnxConfig:
UpperCAmelCase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowercase , _lowercase)
| 344
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="open-llama"
def __init__( self :Union[str, Any] , _lowercase :List[Any]=100000 , _lowercase :Dict=4096 , _lowercase :List[Any]=11008 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=32 , _lowercase :List[str]="silu" , _lowercase :Union[str, Any]=2048 , _lowercase :Any=0.02 , _lowercase :Optional[Any]=1E-6 , _lowercase :str=True , _lowercase :str=0 , _lowercase :Any=1 , _lowercase :Optional[Any]=2 , _lowercase :str=False , _lowercase :Dict=True , _lowercase :Optional[Any]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict=True , _lowercase :List[Any]=True , _lowercase :Dict=None , **_lowercase :Optional[int] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowercase)
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase , )
def __a ( self :int) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowercase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _lowercase)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _lowercase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(_lowercase , _lowercase) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 344
| 1
|
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_lowerCAmelCase = remove_duplicates(key.upper() )
_lowerCAmelCase = len(snake_case )
# First fill cipher with key characters
_lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(snake_case ) , 26 ):
_lowerCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_lowerCAmelCase = alphabet[i - offset]
_lowerCAmelCase = char
return cipher_alphabet
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
return "".join(cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = input("""Enter message to encode or decode: """ ).strip()
_lowerCAmelCase = input("""Enter keyword: """ ).strip()
_lowerCAmelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
_lowerCAmelCase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
_lowerCAmelCase = create_cipher_map(snake_case )
print(func(snake_case , snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 82
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29
| 0
|
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
a : str = 2048
a : Union[str, Any] = 4096
a : Tuple = 42
a : List[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
a : List[Any] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def lowercase__(A ) ->Tuple:
"""simple docstring"""
def choose_first(A , A=False ):
assert isinstance(A , A )
if len(A ) == 1:
lowercase__ : int= answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase__ : Union[str, Any]= {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowercase__ : Dict= {"id": example["id"]}
lowercase__ : Tuple= example["annotations"]
lowercase__ : Any= annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase__ : Dict= ["yes"] if 1 in yes_no_answer else ["no"]
lowercase__ : Optional[Any]= []
lowercase__ : List[Any]= []
lowercase__ : Optional[int]= ["<cls>"]
else:
lowercase__ : Optional[Any]= ["short"]
lowercase__ : Optional[int]= choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowercase__ : List[str]= ["long"]
lowercase__ : Union[str, Any]= choose_first(annotation["long_answer"] , is_long_answer=A )
lowercase__ : int= []
answer.update(A )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowercase__ : List[str]= True
else:
lowercase__ : str= False
lowercase__ : Optional[int]= ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , A ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def lowercase__(A , A=False ) ->Tuple:
"""simple docstring"""
lowercase__ : str= _get_single_answer(A )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase__ : Tuple= example["document"]["tokens"]
lowercase__ : Tuple= []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(A ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase__ : Optional[int]= ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowercase__ : Dict= example["document"]["tokens"]
lowercase__ : Dict= answer["start_token"]
lowercase__ : str= answer["end_token"]
lowercase__ : List[Any]= []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase__ : List[Any]= " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowercase__ : Dict= doc["is_html"][answer["start_token"] : answer["end_token"]]
lowercase__ : Union[str, Any]= doc["token"][answer["start_token"] : answer["end_token"]]
lowercase__ : str= " ".join([old[i] for i in range(len(A ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , A , end="\n" )
print("Old:" , A , end="\n\n" )
return {
"context": " ".join(A ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowercase__(A , A , A=2_048 , A=4_096 , A=True ) ->str:
"""simple docstring"""
lowercase__ : Any= get_context_and_ans(A , assertion=A )
lowercase__ : Union[str, Any]= out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase__ : int= tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowercase__ : List[Any]= input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase__ : Optional[int]= []
lowercase__ : List[str]= []
lowercase__ : Dict= input_ids[:q_len]
lowercase__ : str= range(A , len(A ) , max_length - doc_stride )
for i in doc_start_indices:
lowercase__ : List[Any]= i + max_length - q_len
lowercase__ : Dict= input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(A ),
"end_token": [-100] * len(A ),
"category": category,
},
}
lowercase__ : List[str]= out["context"].split()
lowercase__ : Optional[Any]= splitted_context[answer["end_token"]]
lowercase__ : Optional[Any]= len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=A , ).input_ids )
lowercase__ : Optional[Any]= len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=A ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase__ : List[str]= len(tokenizer(A , add_special_tokens=A ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase__ : Optional[int]= input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowercase__ : int= answer["start_token"]
lowercase__ : int= answer["end_token"]
if assertion:
lowercase__ : Dict= tokenizer.decode(A )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , A , end="\n\n" )
if len(A ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase__ : Union[str, Any]= input_ids[:q_len]
lowercase__ : List[Any]= range(A , len(A ) , max_length - doc_stride )
lowercase__ : Optional[Any]= []
lowercase__ : Dict= []
lowercase__ : List[Any]= []
lowercase__ : Optional[int]= [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase__ : str= i + max_length - q_len
lowercase__ : Union[str, Any]= input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase__ : str= start_token - i + q_len
lowercase__ : str= end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowercase__ : List[Any]= -100
lowercase__ : Any= -100
answers_category.append("null" )
lowercase__ : Optional[int]= inputs[-1][start_token : end_token + 1]
answers_start_token.append(A )
answers_end_token.append(A )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(A ) )
print("Old:" , tokenizer.decode(A ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowercase__(A , A , A=2_048 , A=4_096 , A=False ) ->Optional[int]:
"""simple docstring"""
lowercase__ : Union[str, Any]= get_strided_contexts_and_ans(
A , A , doc_stride=A , max_length=A , assertion=A , )
return example
def lowercase__(A , A ) ->int:
"""simple docstring"""
with jsonlines.open(A , "a" ) as writer:
for example in tqdm(A , total=len(A ) , desc="Saving samples ... " ):
lowercase__ : Any= example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
a : int = load_dataset("""natural_questions""")
a : Tuple = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
a : int = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
a : Tuple = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
a : Union[str, Any] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
a : List[Any] = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
a : Any = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 150
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[str] = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 150
| 1
|
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
return str(snake_case ) == str(snake_case )[::-1]
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
return int(snake_case ) + int(str(snake_case )[::-1] )
def _UpperCAmelCase ( snake_case = 1_00_00 ):
"""simple docstring"""
_lowerCAmelCase = []
for num in range(1 , snake_case ):
_lowerCAmelCase = 0
_lowerCAmelCase = num
while iterations < 50:
_lowerCAmelCase = sum_reverse(snake_case )
iterations += 1
if is_palindrome(snake_case ):
break
else:
lychrel_nums.append(snake_case )
return len(snake_case )
if __name__ == "__main__":
print(f"{solution() = }")
| 82
|
from math import isqrt, loga
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case , snake_case ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case ) if is_prime[i]]
def _UpperCAmelCase ( snake_case = 80_08_00 , snake_case = 80_08_00 ):
"""simple docstring"""
_lowerCAmelCase = degree * loga(snake_case )
_lowerCAmelCase = int(snake_case )
_lowerCAmelCase = calculate_prime_numbers(snake_case )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 82
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
elif weight_type == "inv_freq":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "pos_bias_u" in name:
snake_case_ = None
elif "pos_bias_v" in name:
snake_case_ = None
elif "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = 'weight'
elif "running_mean" in name:
snake_case_ = 'running_mean'
elif "inv_freq" in name:
snake_case_ = 'inv_freq'
elif "running_var" in name:
snake_case_ = 'running_var'
elif "num_batches_tracked" in name:
snake_case_ = 'num_batches_tracked'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) -> Tuple:
if config_path is not None:
snake_case_ = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase , hidden_act='swish' )
else:
snake_case_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
snake_case_ = 'rotary'
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
snake_case_ = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase , )
snake_case_ = True if config.feat_extract_norm == 'layer' else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
snake_case_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
snake_case_ = WavaVecaConformerForCTC(UpperCAmelCase )
else:
snake_case_ = WavaVecaConformerForPreTraining(UpperCAmelCase )
if is_finetuned:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task='audio_pretraining' )
snake_case_ = fairseq.tasks.setup_task(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase )
snake_case_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 312
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 1
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25
|
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : Tuple = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""bridgetower_vision_model"""
def __init__( self , __a=7_68 , __a=12 , __a=3 , __a=16 , __a=2_88 , __a=1 , __a=1e-0_5 , __a=False , __a=True , __a=False , **__a , ):
super().__init__(**__a )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = stop_gradient
__lowerCAmelCase = share_layernorm
__lowerCAmelCase = remove_last_layer
@classmethod
def snake_case ( cls , __a , **__a ):
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(__a , **__a )
if config_dict.get("model_type" ) == "bridgetower":
__lowerCAmelCase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] ="""bridgetower_text_model"""
def __init__( self , __a=5_02_65 , __a=7_68 , __a=12 , __a=12 , __a=1 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_14 , __a=1 , __a=1e-0_5 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , **__a , ):
super().__init__(**__a )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
@classmethod
def snake_case ( cls , __a , **__a ):
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(__a , **__a )
if config_dict.get("model_type" ) == "bridgetower":
__lowerCAmelCase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="""bridgetower"""
def __init__( self , __a=True , __a="gelu" , __a=7_68 , __a=1 , __a=1e-0_5 , __a=False , __a="add" , __a=12 , __a=6 , __a=False , __a=False , __a=None , __a=None , **__a , ):
# TODO: remove this once the Hub files are updated.
__lowerCAmelCase = kwargs.pop("text_config_dict" , __a )
__lowerCAmelCase = kwargs.pop("vision_config_dict" , __a )
super().__init__(**__a )
__lowerCAmelCase = share_cross_modal_transformer_layers
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_size
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = share_link_tower_layers
__lowerCAmelCase = link_tower_type
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = tie_word_embeddings
__lowerCAmelCase = init_layernorm_from_vision_encoder
if text_config is None:
__lowerCAmelCase = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
__lowerCAmelCase = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
__lowerCAmelCase = BridgeTowerTextConfig(**__a )
__lowerCAmelCase = BridgeTowerVisionConfig(**__a )
@classmethod
def snake_case ( cls , __a , __a , **__a ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.text_config.to_dict()
__lowerCAmelCase = self.vision_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 355
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
A : Any = "bert-base-cased"
A : Any = "google/pegasus-xsum"
A : Union[str, Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
A : Union[str, Any] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
A : Optional[int] = "patrickvonplaten/t5-tiny-random"
A : int = "sshleifer/bart-tiny-random"
A : Optional[int] = "sshleifer/tiny-mbart"
A : Any = "sshleifer/tiny-marian-en-de"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = "\n".join(_UpperCamelCase )
Path(_UpperCamelCase ).open("w" ).writelines(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_UpperCamelCase , f"{split}.source" ) , _UpperCamelCase )
_dump_articles(os.path.join(_UpperCamelCase , f"{split}.target" ) , _UpperCamelCase )
return tmp_dir
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowerCAmelCase , __lowerCAmelCase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=__a , max_target_length=__a , src_lang=__a , tgt_lang=__a , )
__lowerCAmelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__a , __a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowerCAmelCase = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = LegacySeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=20 , max_target_length=__a , )
__lowerCAmelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def snake_case ( self ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowerCAmelCase = tmp_dir.joinpath("train.source" ).open().readlines()
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__a , __a , 1_28 , __a )
__lowerCAmelCase = {x.name for x in tmp_dir.iterdir()}
__lowerCAmelCase = {x.name for x in save_dir.iterdir()}
__lowerCAmelCase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__a ) < len(__a )
assert len(__a ) == 1
assert len(packed_examples[0] ) == sum(len(__a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def snake_case ( self ):
if not FAIRSEQ_AVAILABLE:
return
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=64 )
__lowerCAmelCase = 64
__lowerCAmelCase = ds.make_dynamic_sampler(__a , required_batch_size_multiple=__a )
__lowerCAmelCase = [len(__a ) for x in batch_sampler]
assert len(set(__a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__a ) == len(__a ) # no dropped or added examples
__lowerCAmelCase = DataLoader(__a , batch_sampler=__a , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = []
__lowerCAmelCase = []
for batch in data_loader:
__lowerCAmelCase = batch["input_ids"].shape
__lowerCAmelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowerCAmelCase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(__a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__a )
assert num_src_per_batch[0] == max(__a )
if failures:
raise AssertionError(f"too many tokens in {len(__a )} batches" )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=5_12 )
__lowerCAmelCase = 2
__lowerCAmelCase = ds.make_sortish_sampler(__a , shuffle=__a )
__lowerCAmelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 , sampler=__a )
__lowerCAmelCase = tokenizer.pad_token_id
def count_pad_tokens(__a , __a="input_ids" ):
return [batch[k].eq(__a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__a , k="labels" ) ) < sum(count_pad_tokens(__a , k="labels" ) )
assert sum(count_pad_tokens(__a ) ) < sum(count_pad_tokens(__a ) )
assert len(__a ) == len(__a )
def snake_case ( self , __a=10_00 , __a=1_28 ):
if os.getenv("USE_REAL_DATA" , __a ):
__lowerCAmelCase = "examples/seq2seq/wmt_en_ro"
__lowerCAmelCase = max_len * 2 * 64
if not Path(__a ).joinpath("train.len" ).exists():
save_len_file(__a , __a )
else:
__lowerCAmelCase = "examples/seq2seq/test_data/wmt_en_ro"
__lowerCAmelCase = max_len * 4
save_len_file(__a , __a )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=__a , max_target_length=__a , n_obs=__a , )
return ds, max_tokens, tokenizer
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset()
__lowerCAmelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__a ) )
__lowerCAmelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__a ) )
assert idsa.intersection(__a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a , use_fast=__a )
if tok_name == MBART_TINY:
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__a ) == 1 if tok_name == BART_TINY else len(__a ) == 0
| 259
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase = Features({"""audio""": Audio()} )
UpperCamelCase = Features({"""transcription""": Value("""string""" )} )
UpperCamelCase = "audio"
UpperCamelCase = "transcription"
def __magic_name__ ( self : Union[str, Any], __A : str ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column], __A ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
UpperCAmelCase : List[Any] = copy.deepcopy(self )
UpperCAmelCase : str = self.input_schema.copy()
UpperCAmelCase : Tuple = features[self.audio_column]
UpperCAmelCase : List[str] = input_schema
return task_template
@property
def __magic_name__ ( self : List[Any] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 336
|
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : list[list[int]] = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCamelCase : List[Any] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowerCamelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 336
| 1
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
try:
with open(_A , "rb" ) as flax_state_f:
A : List[str] = from_bytes(_A , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_A ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_A , _A )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
A : Tuple = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _A ) ).values()
if any(_A ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
A : int = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _A )
A : str = ''
A : Optional[int] = flatten_dict(_A , sep="." )
A : Optional[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
A : int = []
A : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : Union[str, Any] = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A : List[Any] = flax_key_tuple_array[:-1] + ['weight']
A : Optional[int] = jnp.transpose(_A , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A : str = flax_key_tuple_array[:-1] + ['weight']
A : int = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A : List[str] = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_A ):
A : Dict = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
A : List[Any] = '.'.join(_A )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
A : List[Any] = np.asarray(_A ) if not isinstance(_A , np.ndarray ) else flax_tensor
A : Union[str, Any] = torch.from_numpy(_A )
# remove from missing keys
missing_keys.remove(_A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_A )
pt_model.load_state_dict(_A )
# re-transform missing_keys to list
A : Any = list(_A )
if len(_A ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(_A ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
return pt_model
| 358
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowerCamelCase_ ( _A ,_A ):
'''simple docstring'''
a__ = "resnet"
a__ = ["basic", "bottleneck"]
def __init__( self : Tuple , __lowerCamelCase : int=3 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , __lowerCamelCase : Tuple=[3, 4, 6, 3] , __lowerCamelCase : Optional[Any]="bottleneck" , __lowerCamelCase : Dict="relu" , __lowerCamelCase : Tuple=False , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple , ) -> Optional[Any]:
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
A : Any = num_channels
A : Union[str, Any] = embedding_size
A : Any = hidden_sizes
A : List[str] = depths
A : Union[str, Any] = layer_type
A : Any = hidden_act
A : Any = downsample_in_first_stage
A : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
A , A : int = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> float:
return 1e-3
| 256
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('.' ):
snake_case = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
snake_case = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(_UpperCamelCase )[0].split('.' )[-2]
snake_case = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
snake_case = 'weight_g'
elif "weight_v" in name:
snake_case = 'weight_v'
elif "weight" in name:
snake_case = 'weight'
elif "bias" in name:
snake_case = 'bias'
else:
snake_case = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
"""simple docstring"""
snake_case = full_name.split('conv_layers.' )[-1]
snake_case = name.split('.' )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : Union[str, Any]=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
snake_case = HubertConfig.from_pretrained(_UpperCamelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(_UpperCamelCase , 'vocab.json' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase )
snake_case = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCamelCase , )
snake_case = True if config.feat_extract_norm == 'layer' else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
snake_case = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
snake_case = HubertForCTC(_UpperCamelCase )
else:
snake_case = HubertModel(_UpperCamelCase )
if is_finetuned:
snake_case ,snake_case ,snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case ,snake_case ,snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 150
|
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCAmelCase__ ( _UpperCamelCase : Iterable[str] , _UpperCamelCase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
snake_case = iter(_UpperCamelCase )
while True:
snake_case = tuple(itertools.islice(_UpperCamelCase , _UpperCamelCase ) )
if not chunk:
return
yield chunk
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case = ''
if len(_UpperCamelCase ) < 2:
return dirty
for i in range(len(_UpperCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_UpperCamelCase ) & 1:
clean += "X"
return clean
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> list[str]:
"""simple docstring"""
snake_case = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_UpperCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_UpperCamelCase )
return table
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = generate_table(_UpperCamelCase )
snake_case = prepare_input(_UpperCamelCase )
snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = generate_table(_UpperCamelCase )
snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 150
| 1
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Optional[int] = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase ):
__a = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__a = MaskFormerConfig(backbone_config=_UpperCAmelCase )
__a = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__a = 847
__a = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__a = 150
__a = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__a = 171
__a = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__a = 133
__a = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__a = 19
__a = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__a = 65
__a = '''mapillary-vistas-id2label.json'''
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def __snake_case ( _UpperCAmelCase ):
__a = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight') )
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = dct.pop(_UpperCAmelCase )
__a = val
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__a = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__a = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[:dim, :]
__a = in_proj_bias[: dim]
__a = in_proj_weight[
dim : dim * 2, :
]
__a = in_proj_bias[
dim : dim * 2
]
__a = in_proj_weight[
-dim :, :
]
__a = in_proj_bias[-dim :]
# fmt: on
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
# fmt: off
__a = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__a = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__a = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[: hidden_size, :]
__a = in_proj_bias[:config.hidden_size]
__a = in_proj_weight[hidden_size : hidden_size * 2, :]
__a = in_proj_bias[hidden_size : hidden_size * 2]
__a = in_proj_weight[-hidden_size :, :]
__a = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__a = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__a = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[: hidden_size, :]
__a = in_proj_bias[:config.hidden_size]
__a = in_proj_weight[hidden_size : hidden_size * 2, :]
__a = in_proj_bias[hidden_size : hidden_size * 2]
__a = in_proj_weight[-hidden_size :, :]
__a = in_proj_bias[-hidden_size :]
# fmt: on
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ):
__a = get_maskformer_config(_UpperCAmelCase )
# load original state_dict
with open(_UpperCAmelCase , '''rb''' ) as f:
__a = pickle.load(_UpperCAmelCase )
__a = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__a = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
__a = torch.from_numpy(_UpperCAmelCase )
# load 🤗 model
__a = MaskFormerForInstanceSegmentation(_UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCAmelCase , param.shape )
__a , __a = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCAmelCase ) == 0, f'Unexpected keys: {unexpected_keys}'
# verify results
__a = prepare_img()
if "vistas" in model_name:
__a = 65
elif "cityscapes" in model_name:
__a = 65535
else:
__a = 255
__a = True if '''ade''' in model_name else False
__a = MaskFormerImageProcessor(ignore_index=_UpperCAmelCase , reduce_labels=_UpperCAmelCase )
__a = image_processor(_UpperCAmelCase , return_tensors='''pt''' )
__a = model(**_UpperCAmelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__a = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
image_processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
__snake_case :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case :str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 131
|
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
def get_matched_characters(_UpperCAmelCase , _UpperCAmelCase ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
__a = f'{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}'
return "".join(_UpperCAmelCase )
# matching characters
__a = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
__a = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
__a = len(_UpperCAmelCase )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 131
| 1
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __lowerCAmelCase ( snake_case_):
_a = "efficientformer"
def __init__( self: List[Any] , _lowerCAmelCase: List[int] = [3, 2, 6, 4] , _lowerCAmelCase: List[int] = [48, 96, 2_24, 4_48] , _lowerCAmelCase: List[bool] = [True, True, True, True] , _lowerCAmelCase: int = 4_48 , _lowerCAmelCase: int = 32 , _lowerCAmelCase: int = 4 , _lowerCAmelCase: int = 7 , _lowerCAmelCase: int = 5 , _lowerCAmelCase: int = 8 , _lowerCAmelCase: int = 4 , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: int = 16 , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 2 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: float = 1e-5 , _lowerCAmelCase: str = "gelu" , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: float = 1e-1_2 , _lowerCAmelCase: int = 2_24 , _lowerCAmelCase: float = 1e-0_5 , **_lowerCAmelCase: Optional[Any] , ):
super().__init__(**_A )
lowercase :Dict = hidden_act
lowercase :Union[str, Any] = hidden_dropout_prob
lowercase :Tuple = hidden_sizes
lowercase :Dict = num_hidden_layers
lowercase :Any = num_attention_heads
lowercase :Optional[Any] = initializer_range
lowercase :Optional[Any] = layer_norm_eps
lowercase :List[Any] = patch_size
lowercase :int = num_channels
lowercase :str = depths
lowercase :Union[str, Any] = mlp_expansion_ratio
lowercase :str = downsamples
lowercase :Any = dim
lowercase :int = key_dim
lowercase :Any = attention_ratio
lowercase :Union[str, Any] = resolution
lowercase :Union[str, Any] = pool_size
lowercase :Union[str, Any] = downsample_patch_size
lowercase :Union[str, Any] = downsample_stride
lowercase :str = downsample_pad
lowercase :Optional[Any] = drop_path_rate
lowercase :Any = num_metaad_blocks
lowercase :Tuple = distillation
lowercase :List[Any] = use_layer_scale
lowercase :str = layer_scale_init_value
lowercase :List[Any] = image_size
lowercase :Any = batch_norm_eps
| 236
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__: Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__: Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} )
__magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} )
__magic_name__: bool = field(
default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__a , __a , getattr(__a , __a ) )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
snake_case_ : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
snake_case_ : List[Any] = SeqaSeqDataset
# Get datasets
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
snake_case_ : List[str] = (
dataset_class(
__a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
snake_case_ : Any = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
snake_case_ : List[str] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
snake_case_ : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
snake_case_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
snake_case_ : Tuple = train_result.metrics
snake_case_ : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' )
snake_case_ : str = data_args.n_val
snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('*** Predict ***' )
snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' )
snake_case_ : Union[str, Any] = test_output.metrics
snake_case_ : int = data_args.n_test
if trainer.is_world_process_zero():
snake_case_ : List[str] = round(metrics['test_loss'] , 4 )
handle_metrics('test' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
snake_case_ : Any = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
snake_case_ : Any = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 327
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCamelCase ( _A : bytes , _A : int ) ->np.array:
"""simple docstring"""
lowerCamelCase_ =f'{sampling_rate}'
lowerCamelCase_ ="""1"""
lowerCamelCase_ ="""f32le"""
lowerCamelCase_ =[
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(_A , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase_ =ffmpeg_process.communicate(_A )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase_ =output_stream[0]
lowerCamelCase_ =np.frombuffer(_A , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def __UpperCamelCase ( _A : int , _A : float , _A : str = "f32le" , ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =f'{sampling_rate}'
lowerCamelCase_ ="""1"""
if format_for_conversion == "s16le":
lowerCamelCase_ =2
elif format_for_conversion == "f32le":
lowerCamelCase_ =4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
lowerCamelCase_ =platform.system()
if system == "Linux":
lowerCamelCase_ ="""alsa"""
lowerCamelCase_ ="""default"""
elif system == "Darwin":
lowerCamelCase_ ="""avfoundation"""
lowerCamelCase_ =""":0"""
elif system == "Windows":
lowerCamelCase_ ="""dshow"""
lowerCamelCase_ ="""default"""
lowerCamelCase_ =[
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase_ =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase_ =_ffmpeg_stream(_A , _A )
for item in iterator:
yield item
def __UpperCamelCase ( _A : int , _A : float , _A : Optional[int] = None , _A : Optional[Union[Tuple[float, float], float]] = None , _A : str = "f32le" , ) ->Dict:
"""simple docstring"""
if stream_chunk_s is not None:
lowerCamelCase_ =stream_chunk_s
else:
lowerCamelCase_ =chunk_length_s
lowerCamelCase_ =ffmpeg_microphone(_A , _A , format_for_conversion=_A )
if format_for_conversion == "s16le":
lowerCamelCase_ =np.intaa
lowerCamelCase_ =2
elif format_for_conversion == "f32le":
lowerCamelCase_ =np.floataa
lowerCamelCase_ =4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
lowerCamelCase_ =chunk_length_s / 6
lowerCamelCase_ =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_A , (int, float) ):
lowerCamelCase_ =[stride_length_s, stride_length_s]
lowerCamelCase_ =int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase_ =int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase_ =datetime.datetime.now()
lowerCamelCase_ =datetime.timedelta(seconds=_A )
for item in chunk_bytes_iter(_A , _A , stride=(stride_left, stride_right) , stream=_A ):
# Put everything back in numpy scale
lowerCamelCase_ =np.frombuffer(item["""raw"""] , dtype=_A )
lowerCamelCase_ =(
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase_ =sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCamelCase ( _A : Any , _A : int , _A : Tuple[int, int] , _A : bool = False ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =B""""""
lowerCamelCase_ , lowerCamelCase_ =stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
lowerCamelCase_ =0
for raw in iterator:
acc += raw
if stream and len(_A ) < chunk_len:
lowerCamelCase_ =(_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_A ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase_ =(_stride_left, stride_right)
lowerCamelCase_ ={"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase_ =False
yield item
lowerCamelCase_ =stride_left
lowerCamelCase_ =acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_A ) > stride_left:
lowerCamelCase_ ={"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase_ =False
yield item
def __UpperCamelCase ( _A : Dict , _A : int ) ->int:
"""simple docstring"""
lowerCamelCase_ =2**24 # 16Mo
try:
with subprocess.Popen(_A , stdout=subprocess.PIPE , bufsize=_A ) as ffmpeg_process:
while True:
lowerCamelCase_ =ffmpeg_process.stdout.read(_A )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 49
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __UpperCamelCase ( _A : NDArray[floataa] , _A : NDArray[floataa] , _A : list[int] , _A : int , ) ->list[float]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =coefficient_matrix.shape
lowerCamelCase_ , lowerCamelCase_ =constant_matrix.shape
if rowsa != colsa:
lowerCamelCase_ =f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_A )
if colsa != 1:
lowerCamelCase_ =f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_A )
if rowsa != rowsa:
lowerCamelCase_ =(
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_A )
if len(_A ) != rowsa:
lowerCamelCase_ =(
"""Number of initial values must be equal to number of rows in coefficient """
f'matrix but received {len(_A )} and {rowsa}'
)
raise ValueError(_A )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowerCamelCase_ =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCamelCase_ , lowerCamelCase_ =table.shape
strictly_diagonally_dominant(_A )
# Iterates the whole matrix for given number of times
for _ in range(_A ):
lowerCamelCase_ =[]
for row in range(_A ):
lowerCamelCase_ =0
for col in range(_A ):
if col == row:
lowerCamelCase_ =table[row][col]
elif col == cols - 1:
lowerCamelCase_ =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase_ =(temp + val) / denom
new_val.append(_A )
lowerCamelCase_ =new_val
return [float(_A ) for i in new_val]
def __UpperCamelCase ( _A : NDArray[floataa] ) ->bool:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =table.shape
lowerCamelCase_ =True
for i in range(0 , _A ):
lowerCamelCase_ =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
import math
def A_ ( a = 1_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = sum(i * i for i in range(1 , n + 1 ) )
SCREAMING_SNAKE_CASE_ : List[str] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 253
|
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# Return True if there is node that has not iterated.
UpperCamelCase :Tuple = [False] * len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = []
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = True
while queue:
UpperCamelCase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Optional[int] = u
return visited[t]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
# This array is filled by BFS and to store path
UpperCamelCase :Optional[int] = [-1] * (len(SCREAMING_SNAKE_CASE__ ))
UpperCamelCase :Optional[int] = 0
while bfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Dict = float('''Inf''' )
UpperCamelCase :str = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase :Optional[Any] = min(SCREAMING_SNAKE_CASE__ , graph[parent[s]][s] )
UpperCamelCase :Any = parent[s]
max_flow += path_flow
UpperCamelCase :Tuple = sink
while v != source:
UpperCamelCase :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase :Any = parent[v]
return max_flow
__snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__snake_case , __snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259
| 0
|
import tensorflow as tf
from ...tf_utils import shape_list
class snake_case__ (tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=1 , __lowercase=False , **__lowercase ) -> str:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
a__ : Tuple = vocab_size
a__ : Tuple = d_embed
a__ : Any = d_proj
a__ : Optional[int] = cutoffs + [vocab_size]
a__ : Optional[int] = [0] + self.cutoffs
a__ : Union[str, Any] = div_val
a__ : List[str] = self.cutoffs[0]
a__ : Tuple = len(self.cutoffs ) - 1
a__ : Union[str, Any] = self.shortlist_size + self.n_clusters
a__ : int = keep_order
a__ : Dict = []
a__ : Dict = []
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
if self.n_clusters > 0:
a__ : Dict = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=__lowerCamelCase , name="""cluster_weight""" )
a__ : Dict = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=__lowerCamelCase , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
a__ : Dict = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F'''out_projs_._{i}''' , )
self.out_projs.append(__lowerCamelCase )
else:
self.out_projs.append(__lowerCamelCase )
a__ : List[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F'''out_layers_._{i}_._weight''' , )
a__ : List[Any] = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
a__ : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a__ : Tuple = self.d_embed // (self.div_val**i)
a__ : Tuple = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F'''out_projs_._{i}''' )
self.out_projs.append(__lowerCamelCase )
a__ : Optional[int] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F'''out_layers_._{i}_._weight''' , )
a__ : Union[str, Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__lowerCamelCase )
@staticmethod
def SCREAMING_SNAKE_CASE__( __lowercase , __lowercase , __lowercase , __lowercase=None ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = x
if proj is not None:
a__ : Tuple = tf.einsum("""ibd,ed->ibe""" , __lowerCamelCase , __lowerCamelCase )
return tf.einsum("""ibd,nd->ibn""" , __lowerCamelCase , __lowerCamelCase ) + b
@staticmethod
def SCREAMING_SNAKE_CASE__( __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : Dict = shape_list(__lowerCamelCase )
a__ : Dict = tf.range(lp_size[0] , dtype=target.dtype )
a__ : Optional[Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(__lowerCamelCase , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase=True , __lowercase=False ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[int] = 0
if self.n_clusters == 0:
a__ : List[str] = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
a__ : Optional[int] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase )
a__ : Dict = tf.nn.log_softmax(__lowerCamelCase , axis=-1 )
else:
a__ : Union[str, Any] = shape_list(__lowerCamelCase )
a__ : Optional[int] = []
a__ : Tuple = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
a__ : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
a__ : Tuple = (target >= l_idx) & (target < r_idx)
a__ : Union[str, Any] = tf.where(__lowerCamelCase )
a__ : Tuple = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) - l_idx
if self.div_val == 1:
a__ : Optional[Any] = self.out_layers[0][0][l_idx:r_idx]
a__ : Optional[int] = self.out_layers[0][1][l_idx:r_idx]
else:
a__ : Optional[int] = self.out_layers[i][0]
a__ : int = self.out_layers[i][1]
if i == 0:
a__ : str = tf.concat([cur_W, self.cluster_weight] , 0 )
a__ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
a__ : Union[str, Any] = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0] )
a__ : List[str] = tf.nn.log_softmax(__lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
a__ : Optional[Any] = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
a__ : str = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
else:
a__ : str = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i] )
a__ : Dict = tf.nn.log_softmax(__lowerCamelCase )
a__ : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
a__ : Optional[Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowerCamelCase )
if target is not None:
a__ : List[Any] = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
a__ : Any = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
a__ : Dict = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase ) )
a__ : Union[str, Any] = tf.concat(__lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
a__ : List[Any] = tf.reduce_mean(__lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowerCamelCase , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 368
|
from __future__ import annotations
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : float , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0) != 1:
raise ValueError("""You cannot supply more or less than 2 values""")
elif stress < 0:
raise ValueError("""Stress cannot be negative""")
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""")
elif area < 0:
raise ValueError("""Area cannot be negative""")
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266
| 0
|
from __future__ import annotations
import time
import numpy as np
__A = [8, 5, 9, 7]
__A = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__A = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> None:
'''simple docstring'''
__lowerCamelCase = claim_vector
__lowerCamelCase = allocated_resources_table
__lowerCamelCase = maximum_claim_table
def lowercase_ ( self ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowercase_ ( self ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowercase_ ( self ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowerCamelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowercase_ ( self ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(lowerCamelCase__ ): i for i in self.__need()}
def lowercase_ ( self , **lowerCamelCase__ ) -> None:
'''simple docstring'''
__lowerCamelCase = self.__need()
__lowerCamelCase = self.__allocated_resources_table
__lowerCamelCase = self.__available_resources()
__lowerCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__lowerCamelCase = False
for each_need in need_list:
__lowerCamelCase = True
for index, need in enumerate(lowerCamelCase__ ):
if need > available_resources[index]:
__lowerCamelCase = False
break
if execution:
__lowerCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCamelCase = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(lowerCamelCase__ )
# update available/freed resources stack
__lowerCamelCase = np.array(lowerCamelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(lowerCamelCase__ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(lowerCamelCase__ ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(lowerCamelCase__ ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(lowerCamelCase__ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(lowerCamelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
|
"""simple docstring"""
def lowercase ( a__ : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase = len(a__ )
while cur > 1:
# Find the maximum number in arr
_UpperCamelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCamelCase = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
_UpperCamelCase = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 256
| 0
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase__ ( __snake_case : str ):
UpperCAmelCase_ : Optional[int] = analyze_text(__snake_case )
UpperCAmelCase_ : Tuple = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase_ : Optional[Any] = sum(single_char_strings.values() )
# one length string
UpperCAmelCase_ : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase_ : int = single_char_strings[ch]
UpperCAmelCase_ : Tuple = my_str / all_sum
my_fir_sum += prob * math.loga(__snake_case ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
UpperCAmelCase_ : List[Any] = sum(two_char_strings.values() )
UpperCAmelCase_ : Any = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase_ : Dict = cha + cha
if sequence in two_char_strings:
UpperCAmelCase_ : List[str] = two_char_strings[sequence]
UpperCAmelCase_ : Dict = int(__snake_case ) / all_sum
my_sec_sum += prob * math.loga(__snake_case )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def lowercase__ ( __snake_case : str ):
UpperCAmelCase_ : int = Counter() # type: ignore
UpperCAmelCase_ : Optional[int] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase__ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 361
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 145
| 0
|
def lowerCamelCase_ ( _a , _a = 0 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = length or len(_a )
lowerCAmelCase__ : List[str] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = list_data[i + 1], list_data[i]
lowerCAmelCase__ : Optional[int] = True
return list_data if not swapped else bubble_sort(_a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=13 , _SCREAMING_SNAKE_CASE : Tuple=32 , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : List[Any]=3 , _SCREAMING_SNAKE_CASE : str=16 , _SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , _SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , _SCREAMING_SNAKE_CASE : str=2 , _SCREAMING_SNAKE_CASE : Optional[int]=2.0 , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : Dict=0.0 , _SCREAMING_SNAKE_CASE : str=0.0 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : Tuple="gelu" , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : List[Any]=0.02 , _SCREAMING_SNAKE_CASE : Any=1E-5 , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Any=10 , _SCREAMING_SNAKE_CASE : Union[str, Any]=8 , )-> Dict:
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : Tuple = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : List[str] = mlp_ratio
lowerCAmelCase__ : str = qkv_bias
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = drop_path_rate
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Tuple = use_absolute_embeddings
lowerCAmelCase__ : int = patch_norm
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : Any = scope
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : Any = encoder_stride
def UpperCAmelCase__( self : str )-> Optional[int]:
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__( self : Optional[int] )-> str:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any )-> int:
lowerCAmelCase__ : Union[str, Any] = SwinvaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : List[str] = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any )-> List[Any]:
lowerCAmelCase__ : Optional[int] = SwinvaForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] )-> Union[str, Any]:
lowerCAmelCase__ : Tuple = self.type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Any = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__( self : Tuple )-> str:
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowercase , _lowercase , unittest.TestCase):
_a : str = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_a : Tuple = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_a : List[str] = False
_a : int = False
_a : Optional[int] = False
_a : Optional[Any] = False
def UpperCAmelCase__( self : str )-> Optional[Any]:
lowerCAmelCase__ : Tuple = SwinvaModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
def UpperCAmelCase__( self : str )-> int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__( self : Optional[int] )-> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase__( self : Optional[Any] )-> Dict:
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
pass
def UpperCAmelCase__( self : List[Any] )-> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__( self : Any )-> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Union[str, Any] )-> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : List[str] = outputs.attentions
lowerCAmelCase__ : Union[str, Any] = len(self.model_tester.depths )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = config.window_size**2
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ : int = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowerCAmelCase__ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : str = 2
self.assertEqual(out_len + added_hidden_states , len(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : List[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] )-> Tuple:
lowerCAmelCase__ : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Any = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = outputs.hidden_states
lowerCAmelCase__ : Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
lowerCAmelCase__ : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ : Dict = outputs.reshaped_hidden_states
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reshaped_hidden_states[0].shape
lowerCAmelCase__ : Tuple = (
reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__( self : Tuple )-> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any )-> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Tuple = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def UpperCAmelCase__( self : Dict )-> Optional[Any]:
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str )-> Optional[Any]:
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : Optional[Any] )-> int:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = SwinvaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict )-> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Dict = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _a ( unittest.TestCase):
@cached_property
def UpperCAmelCase__( self : Tuple )-> Optional[Any]:
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__( self : List[Any] )-> List[str]:
lowerCAmelCase__ : Any = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase__ : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 131
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Dict = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
|
from math import isqrt
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = False
return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**8 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 204
| 0
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__snake_case :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __snake_case ( _UpperCAmelCase ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_UpperCAmelCase ):
return ext
raise Exception(
f'Unable to determine file format from file extension {path}. '
f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def __snake_case ( _UpperCAmelCase ):
__a = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__a = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
__a = PipelineDataFormat.from_str(
format=_UpperCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_UpperCAmelCase , _UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Pipeline , __SCREAMING_SNAKE_CASE : PipelineDataFormat):
'''simple docstring'''
__a = nlp
__a = reader
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : ArgumentParser):
'''simple docstring'''
__a = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''')
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''')
run_parser.add_argument('''--input''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file to use for inference''')
run_parser.add_argument('''--output''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file that will be used post to write results.''')
run_parser.add_argument('''--model''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model to instantiate.''')
run_parser.add_argument('''--config''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model\'s config to instantiate.''')
run_parser.add_argument(
'''--tokenizer''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the tokenizer to use. (default: same as the model name)''')
run_parser.add_argument(
'''--column''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__SCREAMING_SNAKE_CASE , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__SCREAMING_SNAKE_CASE , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''')
run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a , __a = self._nlp, []
for entry in self._reader:
__a = nlp(**__SCREAMING_SNAKE_CASE) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE)
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
outputs.append(__SCREAMING_SNAKE_CASE)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__a = self._reader.save_binary(__SCREAMING_SNAKE_CASE)
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}')
else:
self._reader.save(__SCREAMING_SNAKE_CASE)
| 49
|
from collections import defaultdict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = input('''Enter the first string ''').strip()
__snake_case :int = input('''Enter the second string ''').strip()
__snake_case :int = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 49
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''mra'''
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="absolute" , _UpperCAmelCase=4 , _UpperCAmelCase="full" , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__a : Any = vocab_size
__a : Tuple = max_position_embeddings
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Tuple = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[Any] = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Optional[int] = type_vocab_size
__a : Union[str, Any] = layer_norm_eps
__a : int = position_embedding_type
__a : Optional[Any] = block_per_row
__a : Any = approx_mode
__a : Optional[Any] = initial_prior_first_n_blocks
__a : List[Any] = initial_prior_diagonal_n_blocks
| 188
|
"""simple docstring"""
def __A ( a_ :int = 1_00_00_00) -> int:
__a : Tuple = [i - 1 for i in range(limit + 1)]
for i in range(2 , limit + 1):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , a_):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1])
if __name__ == "__main__":
print(solution())
| 188
| 1
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
a_ : Optional[Any] = False
a_ : int = False
def __snake_case ( UpperCAmelCase_ : Namespace ):
return TrainCommand(UpperCAmelCase_ )
class snake_case ( lowercase ):
"""simple docstring"""
@staticmethod
def snake_case ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=UpperCamelCase , required=UpperCamelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=UpperCamelCase , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=UpperCamelCase , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=UpperCamelCase , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=UpperCamelCase , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=UpperCamelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=UpperCamelCase , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=UpperCamelCase , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=UpperCamelCase , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=UpperCamelCase , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=UpperCamelCase , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=UpperCamelCase , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=UpperCamelCase , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = logging.get_logger("transformers-cli/training" )
lowerCamelCase_ = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=UpperCamelCase )
lowerCamelCase_ = args.output
lowerCamelCase_ = args.column_label
lowerCamelCase_ = args.column_text
lowerCamelCase_ = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowerCamelCase_ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
lowerCamelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase_ = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
lowerCamelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase_ = args.validation_split
lowerCamelCase_ = args.train_batch_size
lowerCamelCase_ = args.valid_batch_size
lowerCamelCase_ = args.learning_rate
lowerCamelCase_ = args.adam_epsilon
def snake_case ( self ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case ( self ):
"""simple docstring"""
raise NotImplementedError
def snake_case ( self ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 55
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
self.assertIsInstance(encoding.words, _lowerCamelCase )
self.assertIsInstance(encoding.boxes, _lowerCamelCase )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
# with apply_OCR = True
__A = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' )
__A = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, _lowerCamelCase )
self.assertListEqual(encoding.boxes, _lowerCamelCase )
# with apply_OCR = False
__A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
| 266
| 0
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase : List[Any] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Dict=None ):
require_version(deps[pkg] , snake_case_ )
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
| 0
|
from __future__ import annotations
from math import pi, sqrt
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176
|
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__a = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : int = 1_4 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
_UpperCAmelCase : Union[str, Any] = primes[group]["prime"]
_UpperCAmelCase : List[Any] = primes[group]["generator"]
_UpperCAmelCase : str = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCAmelCase__ )[2:]
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(lowerCAmelCase__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = int(lowerCAmelCase__ , base=1_6 )
if not self.is_valid_public_key(lowerCAmelCase__ ):
raise ValueError("Invalid public key" )
_UpperCAmelCase : Optional[Any] = pow(lowerCAmelCase__ , self.__private_key , self.prime )
return shaaaa(str(lowerCAmelCase__ ).encode() ).hexdigest()
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCAmelCase__ , (prime - 1) // 2 , lowerCAmelCase__ ) == 1
)
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 1_4 ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = int(lowerCAmelCase__ , base=1_6 )
_UpperCAmelCase : List[Any] = int(lowerCAmelCase__ , base=1_6 )
_UpperCAmelCase : str = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("Invalid public key" )
_UpperCAmelCase : Tuple = pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return shaaaa(str(lowerCAmelCase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCamelCase : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__UpperCamelCase : List[str] = {'''facebook/blenderbot-3B''': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Union[str, Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase__ : List[str] = bs[:]
lowerCAmelCase__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A_ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Tuple = [chr(A_ ) for n in cs]
return dict(zip(A_ , A_ ) )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Any = set()
lowerCAmelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : str = char
return pairs
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : str ,lowercase_ : List[Any] ,lowercase_ : int ,lowercase_ : List[str]="replace" ,lowercase_ : Dict="<s>" ,lowercase_ : Any="</s>" ,lowercase_ : Dict="</s>" ,lowercase_ : List[Any]="<s>" ,lowercase_ : Any="<unk>" ,lowercase_ : Dict="<pad>" ,lowercase_ : Union[str, Any]="<mask>" ,lowercase_ : Optional[Any]=False ,**lowercase_ : List[Any] ,):
lowerCAmelCase__ : Optional[Any] = AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else bos_token
lowerCAmelCase__ : List[Any] = AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else eos_token
lowerCAmelCase__ : Optional[int] = AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else sep_token
lowerCAmelCase__ : Optional[int] = AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else cls_token
lowerCAmelCase__ : List[str] = AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else unk_token
lowerCAmelCase__ : Tuple = AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Dict = AddedToken(lowercase_ ,lstrip=lowercase_ ,rstrip=lowercase_ ) if isinstance(lowercase_ ,lowercase_ ) else mask_token
super().__init__(
errors=lowercase_ ,bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,sep_token=lowercase_ ,cls_token=lowercase_ ,pad_token=lowercase_ ,mask_token=lowercase_ ,add_prefix_space=lowercase_ ,**lowercase_ ,)
with open(lowercase_ ,encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ : Optional[int] = json.load(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : str = errors # how to handle errors in decoding
lowerCAmelCase__ : Tuple = bytes_to_unicode()
lowerCAmelCase__ : Any = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase_ ,encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ : List[str] = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Dict = dict(zip(lowercase_ ,range(len(lowercase_ ) ) ) )
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : str = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCAmelCase ( self : Optional[int] ):
return len(self.encoder )
def __lowerCAmelCase ( self : Optional[Any] ):
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : str ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Optional[Any] = tuple(lowercase_ )
lowerCAmelCase__ : Optional[Any] = get_pairs(lowercase_ )
if not pairs:
return token
while True:
lowerCAmelCase__ : Any = min(lowercase_ ,key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = bigram
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Dict = 0
while i < len(lowercase_ ):
try:
lowerCAmelCase__ : List[str] = word.index(lowercase_ ,lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : List[str] = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : str = tuple(lowercase_ )
lowerCAmelCase__ : int = new_word
if len(lowercase_ ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(lowercase_ )
lowerCAmelCase__ : Optional[Any] = ''' '''.join(lowercase_ )
lowerCAmelCase__ : List[Any] = word
return word
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Any ):
lowerCAmelCase__ : Union[str, Any] = []
for token in re.findall(self.pat ,lowercase_ ):
lowerCAmelCase__ : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_ ).split(''' ''' ) )
return bpe_tokens
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Union[str, Any] ):
return self.encoder.get(lowercase_ ,self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Dict ):
return self.decoder.get(lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Optional[int] ):
lowerCAmelCase__ : Tuple = ''''''.join(lowercase_ )
lowerCAmelCase__ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' ,errors=self.errors )
return text
def __lowerCAmelCase ( self : Tuple ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : str = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : List[Any] = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowercase_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowercase_ ,ensure_ascii=lowercase_ ) + '''\n''' )
lowerCAmelCase__ : str = 0
with open(lowercase_ ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ : Optional[Any] = token_index
writer.write(''' '''.join(lowercase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ,lowercase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ ,token_ids_a=lowercase_ ,already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ,lowercase_ : str=False ,**lowercase_ : int ):
lowerCAmelCase__ : Union[str, Any] = kwargs.pop('''add_prefix_space''' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase_ ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : List[str] = ''' ''' + text
return (text, kwargs)
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : "Conversation" ):
lowerCAmelCase__ : List[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(lowercase_ )
lowerCAmelCase__ : Optional[Any] = ''' '''.join(lowercase_ )
lowerCAmelCase__ : List[Any] = self.encode(lowercase_ )
if len(lowercase_ ) > self.model_max_length:
lowerCAmelCase__ : List[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 74
|
"""simple docstring"""
import qiskit
def __SCREAMING_SNAKE_CASE ( A_ = 2 ):
lowerCAmelCase__ : int = qubits
# Using Aer's simulator
lowerCAmelCase__ : str = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
lowerCAmelCase__ : str = qiskit.QuantumCircuit(A_ , A_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A_ ) ) , list(range(A_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCAmelCase__ : List[Any] = qiskit.execute(A_ , A_ , shots=10_00 )
return job.result().get_counts(A_ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 74
| 1
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> list:
'''simple docstring'''
_UpperCAmelCase = int(__lowercase )
if n_element < 1:
_UpperCAmelCase = ValueError("a should be a positive number" )
raise my_error
_UpperCAmelCase = [1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (0, 0, 0)
_UpperCAmelCase = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Any = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
__SCREAMING_SNAKE_CASE :Tuple = hamming(int(n))
print('''-----------------------------------------------------''')
print(F"The list with nth numbers is: {hamming_numbers}")
print('''-----------------------------------------------------''')
| 22
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : str = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 204
| 0
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Tuple , a__ : Optional[int] , a__ : Dict , a__ : Optional[Any]=None , a__ : int=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a__ , a__ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , _UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , a__ : int ):
"""simple docstring"""
super().__init__(a__ )
__snake_case = BertEncoderWithPabee(a__ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def a (self : List[str] , a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = threshold
def a (self : Tuple , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def a (self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(a__ )
@add_start_docstrings_to_model_forward(a__ )
def a (self : Optional[Any] , a__ : Dict=None , a__ : int=None , a__ : str=None , a__ : List[Any]=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Tuple=None , a__ : str=None , a__ : Dict=None , a__ : Any=None , a__ : Optional[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a__ , device=a__ )
if token_type_ids is None:
__snake_case = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a__ , device=a__ )
__snake_case = self.invert_attention_mask(a__ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a__ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a__ , current_layer=a__ , attention_mask=a__ , head_mask=a__ )
__snake_case = self.pooler(a__ )
__snake_case = output_layers[i](output_dropout(a__ ) )
res.append(a__ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a__ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a__ , current_layer=a__ , attention_mask=a__ , head_mask=a__ )
__snake_case = self.pooler(a__ )
__snake_case = output_layers[i](a__ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a__ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , _UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Any , a__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(a__ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a__ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def a (self : List[Any] , a__ : Dict=None , a__ : Any=None , a__ : List[str]=None , a__ : Tuple=None , a__ : Optional[Any]=None , a__ : Union[str, Any]=None , a__ : str=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a__ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 352
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : int , *a__ : List[Any] , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[Any] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : List[str] ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : int , **a__ : int ):
"""simple docstring"""
return {}, {}, {}
def a (self : Optional[int] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 238
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =tempfile.mkdtemp()
# fmt: off
a__ =['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
a__ =dict(zip(lowercase_, range(len(lowercase_ ) ) ) )
a__ =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
a__ ={'''unk_token''': '''<unk>'''}
a__ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
a__ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase_ ) )
a__ ={
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
a__ =os.path.join(self.tmpdirname, lowercase_ )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowercase_, lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> List[str]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token='''!''', **lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> Tuple:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token='''!''', **lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> Dict:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **lowercase_ )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
a__ =[np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
a__ =[Image.fromarray(np.moveaxis(lowercase_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =self.get_tokenizer()
a__ =self.get_rust_tokenizer()
a__ =self.get_image_processor()
a__ =OwlViTProcessor(tokenizer=lowercase_, image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
a__ =OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=lowercase_ )
a__ =OwlViTProcessor(tokenizer=lowercase_, image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
a__ =OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowercase_ )
self.assertIsInstance(processor_fast.tokenizer, lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowercase_ )
self.assertIsInstance(processor_fast.image_processor, lowercase_ )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ =self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
a__ =self.get_image_processor(do_normalize=lowercase_ )
a__ =OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowercase_ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowercase_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowercase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =OwlViTProcessor(tokenizer=lowercase_, image_processor=lowercase_ )
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_, return_tensors='''np''' )
a__ =processor(images=lowercase_, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =OwlViTProcessor(tokenizer=lowercase_, image_processor=lowercase_ )
a__ ='''lower newer'''
a__ =processor(text=lowercase_, return_tensors='''np''' )
a__ =tokenizer(lowercase_, return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist() )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =OwlViTProcessor(tokenizer=lowercase_, image_processor=lowercase_ )
a__ ='''lower newer'''
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_, images=lowercase_ )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ ='''google/owlvit-base-patch32'''
a__ =OwlViTProcessor.from_pretrained(lowercase_ )
a__ =['''cat''', '''nasa badge''']
a__ =processor(text=lowercase_ )
a__ =16
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape, (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ ='''google/owlvit-base-patch32'''
a__ =OwlViTProcessor.from_pretrained(lowercase_ )
a__ =[['''cat''', '''nasa badge'''], ['''person''']]
a__ =processor(text=lowercase_ )
a__ =16
a__ =len(lowercase_ )
a__ =max([len(lowercase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape, (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ ='''google/owlvit-base-patch32'''
a__ =OwlViTProcessor.from_pretrained(lowercase_ )
a__ =['''cat''', '''nasa badge''']
a__ =processor(text=lowercase_ )
a__ =16
a__ =inputs['''input_ids''']
a__ =[
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape, (2, seq_length) )
self.assertListEqual(list(input_ids[0] ), predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ), predicted_ids[1] )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =OwlViTProcessor(tokenizer=lowercase_, image_processor=lowercase_ )
a__ =self.prepare_image_inputs()
a__ =self.prepare_image_inputs()
a__ =processor(images=lowercase_, query_images=lowercase_ )
self.assertListEqual(list(inputs.keys() ), ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =OwlViTProcessor(tokenizer=lowercase_, image_processor=lowercase_ )
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_ )
a__ =tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_, lowercase_ )
| 188
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, **lowercase_ ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self, lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
return super().__call__(lowercase_, **lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> int:
"""simple docstring"""
a__ ={}
if "candidate_labels" in kwargs:
a__ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
a__ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _UpperCAmelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a sound of {}." ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lowercase_, lowercase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
a__ =requests.get(lowercase_ ).content
else:
with open(lowercase_, '''rb''' ) as f:
a__ =f.read()
if isinstance(lowercase_, lowercase_ ):
a__ =ffmpeg_read(lowercase_, self.feature_extractor.sampling_rate )
if not isinstance(lowercase_, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
a__ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
a__ =candidate_labels
a__ =[hypothesis_template.format(lowercase_ ) for x in candidate_labels]
a__ =self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ )
a__ =[text_inputs]
return inputs
def _UpperCAmelCase ( self, lowercase_ ) -> str:
"""simple docstring"""
a__ =model_inputs.pop('''candidate_labels''' )
a__ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowercase_ ):
a__ =text_inputs[0]
else:
# Batching case.
a__ =text_inputs[0][0]
a__ =self.model(**lowercase_, **lowercase_ )
a__ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _UpperCAmelCase ( self, lowercase_ ) -> Any:
"""simple docstring"""
a__ =model_outputs.pop('''candidate_labels''' )
a__ =model_outputs['''logits'''][0]
if self.framework == "pt":
a__ =logits.softmax(dim=0 )
a__ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
a__ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] )
]
return result
| 188
| 1
|
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(A__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 249
|
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
if index == r:
for j in range(A__ ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCamelCase = arr[i]
combination_util(A__ , A__ , A__ , index + 1 , A__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(A__ , A__ , A__ , A__ , A__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
# A temporary array to store all combination one by one
UpperCamelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(A__ , A__ , A__ , 0 , A__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowerCamelCase : Optional[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 249
| 1
|
"""simple docstring"""
def _A ( UpperCamelCase_ : Optional[int]) -> Any:
'''simple docstring'''
if n == 1 or not isinstance(_UpperCamelCase, _UpperCamelCase):
return 0
elif n == 2:
return 1
else:
__lowercase = [0, 1]
for i in range(2, n + 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence[n]
def _A ( UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
__lowercase = 0
__lowercase = 2
while digits < n:
index += 1
__lowercase = len(str(fibonacci(_UpperCamelCase)))
return index
def _A ( UpperCamelCase_ : List[Any] = 1000) -> Optional[Any]:
'''simple docstring'''
return fibonacci_digits_index(_UpperCamelCase)
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3
| 86
| 0
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class a_ :
'''simple docstring'''
UpperCAmelCase_ = None
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowerCAmelCase__ = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowercase__)
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = os.path.join(lowercase__ , 'feat_extract.json')
feat_extract_first.to_json_file(lowercase__)
lowerCAmelCase__ = self.feature_extraction_class.from_json_file(lowercase__)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = feat_extract_first.save_pretrained(lowercase__)[0]
check_json_file_has_correct_format(lowercase__)
lowerCAmelCase__ = self.feature_extraction_class.from_pretrained(lowercase__)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = self.feature_extraction_class()
self.assertIsNotNone(lowercase__)
| 119
|
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.