code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
class lowerCAmelCase__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Optional[int]:
__lowerCamelCase = data
__lowerCamelCase = previous
__lowerCamelCase = next_node
def __str__( self : Tuple ) -> str:
return f'''{self.data}'''
def __A ( self : List[Any] ) -> int:
return self.data
def __A ( self : Union[str, Any] ) -> Any:
return self.next
def __A ( self : List[Any] ) -> List[str]:
return self.previous
class lowerCAmelCase__ :
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
__lowerCamelCase = head
def __iter__( self : Union[str, Any] ) -> List[str]:
return self
def __A ( self : List[str] ) -> Optional[int]:
if not self.current:
raise StopIteration
else:
__lowerCamelCase = self.current.get_data()
__lowerCamelCase = self.current.get_next()
return value
class lowerCAmelCase__ :
def __init__( self : List[str] ) -> Union[str, Any]:
__lowerCamelCase = None # First node in list
__lowerCamelCase = None # Last node in list
def __str__( self : Tuple ) -> Optional[int]:
__lowerCamelCase = self.head
__lowerCamelCase = []
while current is not None:
nodes.append(current.get_data() )
__lowerCamelCase = current.get_next()
return " ".join(str(SCREAMING_SNAKE_CASE__ ) for node in nodes )
def __contains__( self : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> str:
__lowerCamelCase = self.head
while current:
if current.get_data() == value:
return True
__lowerCamelCase = current.get_next()
return False
def __iter__( self : Optional[int] ) -> Any:
return LinkedListIterator(self.head )
def __A ( self : Optional[Any] ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def __A ( self : List[Any] ) -> Any:
if self.tail:
return self.tail.get_data()
return None
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Node ) -> None:
if self.head is None:
__lowerCamelCase = node
__lowerCamelCase = node
else:
self.insert_before_node(self.head , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Node ) -> None:
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE__ )
else:
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCamelCase = Node(SCREAMING_SNAKE_CASE__ )
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE__ )
else:
self.set_tail(SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> None:
__lowerCamelCase = node
__lowerCamelCase = node.previous
if node.get_previous() is None:
__lowerCamelCase = node_to_insert
else:
__lowerCamelCase = node_to_insert
__lowerCamelCase = node_to_insert
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> None:
__lowerCamelCase = node
__lowerCamelCase = node.next
if node.get_next() is None:
__lowerCamelCase = node_to_insert
else:
__lowerCamelCase = node_to_insert
__lowerCamelCase = node_to_insert
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCamelCase = 1
__lowerCamelCase = Node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.head
while node:
if current_position == position:
self.insert_before_node(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return
current_position += 1
__lowerCamelCase = node.next
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Node:
__lowerCamelCase = self.head
while node:
if node.get_data() == item:
return node
__lowerCamelCase = node.get_next()
raise Exception('''Node not found''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
if (node := self.get_node(SCREAMING_SNAKE_CASE__ )) is not None:
if node == self.head:
__lowerCamelCase = self.head.get_next()
if node == self.tail:
__lowerCamelCase = self.tail.get_previous()
self.remove_node_pointers(SCREAMING_SNAKE_CASE__ )
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : Node ) -> None:
if node.get_next():
__lowerCamelCase = node.previous
if node.get_previous():
__lowerCamelCase = node.next
__lowerCamelCase = None
__lowerCamelCase = None
def __A ( self : str ) -> Any:
return self.head is None
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
SCREAMING_SNAKE_CASE__ : Any = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : Tuple = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : int = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"num_train_timesteps": 40,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
SCREAMING_SNAKE_CASE__ : Tuple = {
"num_train_timesteps": 151,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Union[str, Any]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str=False ) -> List[str]:
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
__lowerCamelCase = checkpoint[f'''{old_prefix}.skip_connection.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
__lowerCamelCase = checkpoint[f'''{old_prefix}.norm.weight''']
__lowerCamelCase = checkpoint[f'''{old_prefix}.norm.bias''']
__lowerCamelCase = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCamelCase = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
__lowerCamelCase = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Optional[int]:
__lowerCamelCase = torch.load(__lowerCAmelCase , map_location='''cpu''' )
__lowerCamelCase = {}
__lowerCamelCase = checkpoint['''time_embed.0.weight''']
__lowerCamelCase = checkpoint['''time_embed.0.bias''']
__lowerCamelCase = checkpoint['''time_embed.2.weight''']
__lowerCamelCase = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
__lowerCamelCase = checkpoint['''label_emb.weight''']
__lowerCamelCase = checkpoint['''input_blocks.0.0.weight''']
__lowerCamelCase = checkpoint['''input_blocks.0.0.bias''']
__lowerCamelCase = unet_config['''down_block_types''']
__lowerCamelCase = unet_config['''layers_per_block''']
__lowerCamelCase = unet_config['''attention_head_dim''']
__lowerCamelCase = unet_config['''block_out_channels''']
__lowerCamelCase = 1
__lowerCamelCase = channels_list[0]
for i, layer_type in enumerate(__lowerCAmelCase ):
__lowerCamelCase = channels_list[i]
__lowerCamelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCAmelCase ):
__lowerCamelCase = f'''down_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''input_blocks.{current_layer}.0'''
__lowerCamelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCAmelCase ):
__lowerCamelCase = f'''down_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''input_blocks.{current_layer}.0'''
__lowerCamelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
__lowerCamelCase = f'''down_blocks.{i}.attentions.{j}'''
__lowerCamelCase = f'''input_blocks.{current_layer}.1'''
__lowerCamelCase = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
__lowerCamelCase = f'''down_blocks.{i}.downsamplers.0'''
__lowerCamelCase = f'''input_blocks.{current_layer}.0'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
__lowerCamelCase = current_channels
# hardcoded the mid-block for now
__lowerCamelCase = '''mid_block.resnets.0'''
__lowerCamelCase = '''middle_block.0'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = '''mid_block.attentions.0'''
__lowerCamelCase = '''middle_block.1'''
__lowerCamelCase = convert_attention(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = '''mid_block.resnets.1'''
__lowerCamelCase = '''middle_block.2'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = 0
__lowerCamelCase = unet_config['''up_block_types''']
for i, layer_type in enumerate(__lowerCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCamelCase = f'''up_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''output_blocks.{current_layer}.0'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
__lowerCamelCase = f'''up_blocks.{i}.upsamplers.0'''
__lowerCamelCase = f'''output_blocks.{current_layer-1}.1'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCamelCase = f'''up_blocks.{i}.resnets.{j}'''
__lowerCamelCase = f'''output_blocks.{current_layer}.0'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
__lowerCamelCase = f'''up_blocks.{i}.attentions.{j}'''
__lowerCamelCase = f'''output_blocks.{current_layer}.1'''
__lowerCamelCase = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
__lowerCamelCase = f'''up_blocks.{i}.upsamplers.0'''
__lowerCamelCase = f'''output_blocks.{current_layer-1}.2'''
__lowerCamelCase = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = checkpoint['''out.0.weight''']
__lowerCamelCase = checkpoint['''out.0.bias''']
__lowerCamelCase = checkpoint['''out.2.weight''']
__lowerCamelCase = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Dict = strabool(args.class_cond)
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE__ : str = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE__ : Tuple = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Any = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Tuple = con_pt_to_diffuser(args.unet_path, unet_config)
SCREAMING_SNAKE_CASE__ : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
SCREAMING_SNAKE_CASE__ : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
SCREAMING_SNAKE_CASE__ : List[str] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
SCREAMING_SNAKE_CASE__ : List[Any] = CMStochasticIterativeScheduler(**scheduler_config)
SCREAMING_SNAKE_CASE__ : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 298 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , lowercase__ : CLIPSegForImageSegmentation , lowercase__ : CLIPSegProcessor , lowercase__ : AutoencoderKL , lowercase__ : CLIPTextModel , lowercase__ : CLIPTokenizer , lowercase__ : UNetaDConditionModel , lowercase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase__ : StableDiffusionSafetyChecker , lowercase__ : CLIPImageProcessor , ) ->List[Any]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
_UpperCamelCase : Optional[int] = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCamelCase : Tuple = dict(scheduler.config )
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : str = FrozenDict(__UpperCamelCase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
_UpperCamelCase : Any = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCamelCase : Union[str, Any] = dict(scheduler.config )
_UpperCamelCase : str = True
_UpperCamelCase : Optional[int] = FrozenDict(__UpperCamelCase )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__UpperCamelCase , segmentation_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def snake_case__ ( self : str , lowercase__ : Optional[Union[str, int]] = "auto" ) ->Dict:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def snake_case__ ( self : str ) ->Any:
'''simple docstring'''
self.enable_attention_slicing(__UpperCamelCase )
def snake_case__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCamelCase : int = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case__ ( self : int ) ->Optional[int]:
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Dict , lowercase__ : Union[str, List[str]] , lowercase__ : Union[torch.FloatTensor, PIL.Image.Image] , lowercase__ : str , lowercase__ : int = 512 , lowercase__ : int = 512 , lowercase__ : int = 50 , lowercase__ : float = 7.5 , lowercase__ : Optional[Union[str, List[str]]] = None , lowercase__ : Optional[int] = 1 , lowercase__ : float = 0.0 , lowercase__ : Optional[torch.Generator] = None , lowercase__ : Optional[torch.FloatTensor] = None , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , lowercase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase__ : int = 1 , **lowercase__ : List[Any] , ) ->Any:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
_UpperCamelCase : List[Any] = self.segmentation_model(**__UpperCamelCase )
_UpperCamelCase : Union[str, Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_UpperCamelCase : Optional[Any] = self.numpy_to_pil(__UpperCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_UpperCamelCase : Dict = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , )
| 707 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''gptj'''
UpperCAmelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , lowercase__ : Union[str, Any]=50_400 , lowercase__ : Union[str, Any]=2_048 , lowercase__ : Tuple=4_096 , lowercase__ : List[str]=28 , lowercase__ : Optional[int]=16 , lowercase__ : str=64 , lowercase__ : Any=None , lowercase__ : Any="gelu_new" , lowercase__ : Union[str, Any]=0.0 , lowercase__ : Optional[Any]=0.0 , lowercase__ : Any=0.0 , lowercase__ : Tuple=1e-5 , lowercase__ : Any=0.0_2 , lowercase__ : int=True , lowercase__ : int=50_256 , lowercase__ : Any=50_256 , lowercase__ : Tuple=False , **lowercase__ : str , ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : List[str] = n_positions
_UpperCamelCase : Union[str, Any] = n_embd
_UpperCamelCase : Union[str, Any] = n_layer
_UpperCamelCase : Optional[Any] = n_head
_UpperCamelCase : Dict = n_inner
_UpperCamelCase : Optional[Any] = rotary_dim
_UpperCamelCase : Tuple = activation_function
_UpperCamelCase : List[Any] = resid_pdrop
_UpperCamelCase : Any = embd_pdrop
_UpperCamelCase : Optional[Any] = attn_pdrop
_UpperCamelCase : Optional[Any] = layer_norm_epsilon
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Optional[int] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Any = eos_token_id
super().__init__(
bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase__ : PretrainedConfig , lowercase__ : str = "default" , lowercase__ : List[PatchingSpec] = None , lowercase__ : bool = False , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(lowercase__ , task=lowercase__ , patching_specs=lowercase__ , use_past=lowercase__ )
if not getattr(self._config , "pad_token_id" , lowercase__ ):
# TODO: how to do that better?
_UpperCamelCase : Optional[int] = 0
@property
def snake_case__ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCamelCase : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction="inputs" )
_UpperCamelCase : str = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCamelCase : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self : int ) ->int:
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self : Dict ) ->int:
'''simple docstring'''
return self._config.n_head
def snake_case__ ( self : int , lowercase__ : PreTrainedTokenizer , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : bool = False , lowercase__ : Optional[TensorType] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = super(lowercase__ , self ).generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
# We need to order the input in the way they appears in the forward()
_UpperCamelCase : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCamelCase , _UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCamelCase : Optional[int] = seqlen + 2
_UpperCamelCase : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCamelCase : Dict = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers )
]
_UpperCamelCase : str = common_inputs["attention_mask"]
if self.use_past:
_UpperCamelCase : int = ordered_inputs["attention_mask"].dtype
_UpperCamelCase : Optional[int] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 )
return ordered_inputs
@property
def snake_case__ ( self : Tuple ) ->int:
'''simple docstring'''
return 13
| 204 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a ( __UpperCAmelCase : int=None ) -> List[Any]:
if subparsers is not None:
__magic_name__: str = subparsers.add_parser("""env""" )
else:
__magic_name__: List[Any] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=__UpperCAmelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def a ( __UpperCAmelCase : Union[str, Any] ) -> int:
__magic_name__: Union[str, Any] = torch.__version__
__magic_name__: Optional[int] = torch.cuda.is_available()
__magic_name__: Tuple = is_xpu_available()
__magic_name__: Tuple = is_npu_available()
__magic_name__: str = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__UpperCAmelCase ):
__magic_name__: List[Any] = load_config_from_file(args.config_file ).to_dict()
__magic_name__: str = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(__UpperCAmelCase ),
"""PyTorch NPU available""": str(__UpperCAmelCase ),
"""System RAM""": f'{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB',
}
if pt_cuda_available:
__magic_name__: Union[str, Any] = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f'- {prop}: {val}' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
__magic_name__: Optional[Any] = (
"""\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else f'\t{accelerate_config}'
)
print(__UpperCAmelCase )
__magic_name__: Tuple = accelerate_config
return info
def a ( ) -> int:
__magic_name__: List[Any] = env_command_parser()
__magic_name__: Any = parser.parse_args()
env_command(__UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 96 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = DiTPipeline
lowerCamelCase__ : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCamelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCamelCase__ : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ : Optional[Any] = False
def _UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
lowercase__ : Dict = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=a , )
lowercase__ : int = AutoencoderKL()
lowercase__ : Dict = DDIMScheduler()
lowercase__ : List[str] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _UpperCAmelCase ( self , a , a=0 ) -> Dict:
if str(a ).startswith('mps' ):
lowercase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowercase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowercase__ : Optional[int] = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = 'cpu'
lowercase__ : Any = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = self.get_dummy_inputs(a )
lowercase__ : List[str] = pipe(**a ).images
lowercase__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
lowercase__ : Any = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1e-3 )
def _UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCAmelCase ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowercase__ : List[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
lowercase__ : Optional[int] = pipe.get_label_ids(a )
lowercase__ : Optional[int] = pipe(a , generator=a , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(a , a ):
lowercase__ : Tuple = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowercase__ : Tuple = ['vase', 'umbrella']
lowercase__ : List[str] = pipe.get_label_ids(a )
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : Optional[Any] = pipe(a , generator=a , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(a , a ):
lowercase__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 599 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1):
for perpendicular in range(snake_case__ , max_perimeter + 1):
lowerCAmelCase_ : Union[str, Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case__):
lowerCAmelCase_ : List[str] = int(base + perpendicular + hypotenuse)
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCamelCase ( snake_case__ = 10_00):
lowerCAmelCase_ : Any = pythagorean_triple(snake_case__)
return triplets.most_common(1)[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 700 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Any = logging.get_logger(__name__)
_a : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_a : Tuple = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_a : str = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
A = GPTaTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase=False , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
lowerCAmelCase__ :List[str] = kwargs.pop("add_bos_token" , _lowerCAmelCase )
lowerCAmelCase__ :List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowerCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Any = getattr(_lowerCAmelCase , pre_tok_state.pop("type" ) )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :Union[str, Any] = pre_tok_class(**_lowerCAmelCase )
lowerCAmelCase__ :int = add_prefix_space
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
lowerCAmelCase__ :int = input_ids[-self.model_max_length :]
return input_ids
| 145 |
import math
def snake_case__ ( UpperCAmelCase : int ):
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowerCAmelCase__ :Tuple = range(3 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict=1 , **UpperCAmelCase : List[str] ):
lowerCAmelCase__ :Dict = factor * value
lowerCAmelCase__ :Optional[Any] = value
while not is_prime(UpperCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **UpperCAmelCase )
return value
| 145 | 1 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCamelCase ( __lowerCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "trocr"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__(self , _lowerCamelCase=50265 , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=16 , _lowerCamelCase=4096 , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Optional[Any] = d_model
UpperCAmelCase__ : Dict = decoder_layers
UpperCAmelCase__ : List[str] = decoder_attention_heads
UpperCAmelCase__ : List[Any] = decoder_ffn_dim
UpperCAmelCase__ : Optional[int] = activation_function
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : str = dropout
UpperCAmelCase__ : Union[str, Any] = attention_dropout
UpperCAmelCase__ : List[Any] = activation_dropout
UpperCAmelCase__ : List[Any] = init_std
UpperCAmelCase__ : List[str] = decoder_layerdrop
UpperCAmelCase__ : Tuple = use_cache
UpperCAmelCase__ : int = scale_embedding
UpperCAmelCase__ : List[Any] = use_learned_position_embeddings
UpperCAmelCase__ : Optional[Any] = layernorm_embedding
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 182 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class __magic_name__ ( __lowerCAmelCase):
A: List[str] = 'conditional_detr'
A: Union[str, Any] = ['past_key_values']
A: List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Optional[Any]=300 , lowerCamelCase__ : Dict=6 , lowerCamelCase__ : str=2048 , lowerCamelCase__ : Optional[Any]=8 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Optional[Any]=2048 , lowerCamelCase__ : Tuple=8 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Optional[int]="relu" , lowerCamelCase__ : List[str]=256 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : Dict=1.0 , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]="sine" , lowerCamelCase__ : Any="resnet50" , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : int=5 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : int=1 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : List[Any]=5 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : List[str]=0.25 , **lowerCamelCase__ : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCamelCase__ : List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Optional[Any] = backbone_config.get('''model_type''' )
UpperCamelCase__ : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ : Union[str, Any] = config_class.from_dict(lowerCamelCase__ )
UpperCamelCase__ : str = use_timm_backbone
UpperCamelCase__ : Optional[int] = backbone_config
UpperCamelCase__ : Optional[int] = num_channels
UpperCamelCase__ : List[Any] = num_queries
UpperCamelCase__ : Any = d_model
UpperCamelCase__ : Optional[Any] = encoder_ffn_dim
UpperCamelCase__ : List[Any] = encoder_layers
UpperCamelCase__ : Dict = encoder_attention_heads
UpperCamelCase__ : int = decoder_ffn_dim
UpperCamelCase__ : Dict = decoder_layers
UpperCamelCase__ : Optional[int] = decoder_attention_heads
UpperCamelCase__ : Tuple = dropout
UpperCamelCase__ : Any = attention_dropout
UpperCamelCase__ : Optional[Any] = activation_dropout
UpperCamelCase__ : Union[str, Any] = activation_function
UpperCamelCase__ : List[Any] = init_std
UpperCamelCase__ : Optional[Any] = init_xavier_std
UpperCamelCase__ : Any = encoder_layerdrop
UpperCamelCase__ : Optional[Any] = decoder_layerdrop
UpperCamelCase__ : Optional[int] = encoder_layers
UpperCamelCase__ : Any = auxiliary_loss
UpperCamelCase__ : Tuple = position_embedding_type
UpperCamelCase__ : Tuple = backbone
UpperCamelCase__ : List[str] = use_pretrained_backbone
UpperCamelCase__ : str = dilation
# Hungarian matcher
UpperCamelCase__ : int = class_cost
UpperCamelCase__ : int = bbox_cost
UpperCamelCase__ : Optional[Any] = giou_cost
# Loss coefficients
UpperCamelCase__ : int = mask_loss_coefficient
UpperCamelCase__ : Optional[int] = dice_loss_coefficient
UpperCamelCase__ : List[Any] = cls_loss_coefficient
UpperCamelCase__ : Dict = bbox_loss_coefficient
UpperCamelCase__ : Dict = giou_loss_coefficient
UpperCamelCase__ : Dict = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__ : str = self.backbone_config.to_dict()
UpperCamelCase__ : List[Any] = self.__class__.model_type
return output
class __magic_name__ ( __lowerCAmelCase):
A: Optional[int] = version.parse("1.11")
@property
def UpperCAmelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCAmelCase__ ( self : Any ) -> float:
'''simple docstring'''
return 1E-5
@property
def UpperCAmelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return 12
| 701 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__UpperCamelCase : int = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def _a ( SCREAMING_SNAKE_CASE : str = "mumbai" ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
UpperCamelCase__ : Optional[Any] = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
UpperCamelCase__ : Union[str, Any] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"Job {i:>2} is {job[0]} at {job[1]}")
| 106 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : Dict = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 |
"""simple docstring"""
lowercase = 9.80_665
def UpperCAmelCase ( A : float , A : float , A : float = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 573 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = KandinskyVaaImgaImgPipeline
SCREAMING_SNAKE_CASE = ['''image_embeds''', '''negative_image_embeds''', '''image''']
SCREAMING_SNAKE_CASE = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
SCREAMING_SNAKE_CASE = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE = False
@property
def _UpperCamelCase ( self ):
return 32
@property
def _UpperCamelCase ( self ):
return 32
@property
def _UpperCamelCase ( self ):
return self.time_input_dim
@property
def _UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ):
return 100
@property
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
UpperCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase = UNetaDConditionModel(**A )
return model
@property
def _UpperCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self ):
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self ):
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCAmelCase = DDIMScheduler(**A )
UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _UpperCamelCase ( self ,A ,A=0 ):
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
A )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A )
UpperCAmelCase = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((256, 256) )
if str(A ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(A )
else:
UpperCAmelCase = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _UpperCamelCase ( self ):
UpperCAmelCase = """cpu"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**A )
UpperCAmelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase = pipe(**self.get_dummy_inputs(A ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(A ) ,return_dict=A ,)[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase = """A red cartoon frog, 4k"""
UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(A )
UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" ,torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
UpperCAmelCase = pipeline(
image=A ,image_embeds=A ,negative_image_embeds=A ,generator=A ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type="""np""" ,)
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A ,A )
| 74 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
| 74 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : bool = False ) -> str:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = f"""Expected string as input, found {type(snake_case_ )}"""
raise ValueError(snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = f"""Expected boolean as use_pascal parameter, found {type(snake_case_ )}"""
raise ValueError(snake_case_ )
__lowerCAmelCase = input_str.split("""_""" )
__lowerCAmelCase = 0 if use_pascal else 1
__lowerCAmelCase = words[start_index:]
__lowerCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowerCAmelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 427 | '''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_A : Optional[Any] = object()
# For specifying empty leaf dict `{}`
_A : Dict = object()
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(snake_case_ ) - len(snake_case_ ) + 1 ):
__lowerCAmelCase = [x.match(snake_case_ ) for x, y in zip(snake_case_ , ks[i:] )]
if matches and all(snake_case_ ):
return True
return False
def UpperCamelCase_ ( snake_case_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
def replace(snake_case_ : Tuple , snake_case_ : Optional[Any] ):
for rule, replacement in rules:
if _match(snake_case_ , snake_case_ ):
return replacement
return val
return replace
def UpperCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , snake_case_ )),
(("transformer", "wte", "embedding"), P("""mp""" , snake_case_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(snake_case_ , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , snake_case_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(snake_case_ , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , snake_case_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = _get_partition_rules()
__lowerCAmelCase = _replacement_rules(snake_case_ )
__lowerCAmelCase = {k: _unmatched for k in flatten_dict(snake_case_ )}
__lowerCAmelCase = {k: replace(snake_case_ , snake_case_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(snake_case_ ) )
| 427 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Dict = 'autoformer'
_A : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "student_t" , lowerCamelCase = "nll" , lowerCamelCase = 1 , lowerCamelCase = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase = True , lowerCamelCase = 0 , lowerCamelCase = 0 , lowerCamelCase = 0 , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 64 , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 2 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 1_00 , lowerCamelCase = 0.0_2 , lowerCamelCase = True , lowerCamelCase=True , lowerCamelCase = 10 , lowerCamelCase = 25 , lowerCamelCase = 3 , **lowerCamelCase , ):
# time series specific configuration
snake_case__ = prediction_length
snake_case__ = context_length if context_length is not None else prediction_length
snake_case__ = distribution_output
snake_case__ = loss
snake_case__ = input_size
snake_case__ = num_time_features
snake_case__ = lags_sequence
snake_case__ = scaling
snake_case__ = num_dynamic_real_features
snake_case__ = num_static_real_features
snake_case__ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
snake_case__ = cardinality
else:
snake_case__ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
snake_case__ = embedding_dimension
else:
snake_case__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ = num_parallel_samples
# Transformer architecture configuration
snake_case__ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case__ = d_model
snake_case__ = encoder_attention_heads
snake_case__ = decoder_attention_heads
snake_case__ = encoder_ffn_dim
snake_case__ = decoder_ffn_dim
snake_case__ = encoder_layers
snake_case__ = decoder_layers
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = encoder_layerdrop
snake_case__ = decoder_layerdrop
snake_case__ = activation_function
snake_case__ = init_std
snake_case__ = use_cache
# Autoformer
snake_case__ = label_length
snake_case__ = moving_average
snake_case__ = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def A_ ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 713 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__magic_name__ = 8
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase=BITS ):
snake_case__ = x.device
snake_case__ = (x * 255).int().clamp(0 , 255 )
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase )
snake_case__ = rearrange(__lowerCAmelCase , "d -> d 1 1" )
snake_case__ = rearrange(__lowerCAmelCase , "b c h w -> b c 1 h w" )
snake_case__ = ((x & mask) != 0).float()
snake_case__ = rearrange(__lowerCAmelCase , "b c d h w -> b (c d) h w" )
snake_case__ = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase=BITS ):
snake_case__ = x.device
snake_case__ = (x > 0).int()
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCAmelCase , dtype=torch.intaa )
snake_case__ = rearrange(__lowerCAmelCase , "d -> d 1 1" )
snake_case__ = rearrange(__lowerCAmelCase , "b (c d) h w -> b c d h w" , d=8 )
snake_case__ = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = True , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case__ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case__ = self.alphas_cumprod[timestep]
snake_case__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
snake_case__ = self._get_variance(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case__ = model_output.device if torch.is_tensor(__lowerCAmelCase ) else "cpu"
snake_case__ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__lowerCAmelCase ).to(__lowerCAmelCase )
snake_case__ = self._get_variance(__lowerCAmelCase , __lowerCAmelCase ) ** 0.5 * eta * noise
snake_case__ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="epsilon" , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
snake_case__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case__ , snake_case__ = torch.split(__lowerCAmelCase , sample.shape[1] , dim=1 )
else:
snake_case__ = None
# 1. compute alphas, betas
snake_case__ = self.alphas_cumprod[t]
snake_case__ = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case__ = 1 - alpha_prod_t
snake_case__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(__lowerCAmelCase , -scale , __lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case__ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case__ = 0
if t > 0:
snake_case__ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__lowerCAmelCase ).to(model_output.device )
snake_case__ = (self._get_variance(__lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise
snake_case__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__lowerCAmelCase , pred_original_sample=__lowerCAmelCase )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1.0 , ):
super().__init__()
snake_case__ = bit_scale
snake_case__ = (
ddim_bit_scheduler_step if isinstance(lowerCamelCase , lowerCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = 2_56 , lowerCamelCase = 2_56 , lowerCamelCase = 50 , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = "pil" , lowerCamelCase = True , **lowerCamelCase , ):
snake_case__ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCamelCase , )
snake_case__ = decimal_to_bits(lowerCamelCase ) * self.bit_scale
snake_case__ = latents.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case__ = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
snake_case__ = bits_to_decimal(lowerCamelCase )
if output_type == "pil":
snake_case__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 530 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__snake_case :List[Any] ='\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__snake_case :List[str] ='\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__snake_case :Dict ='\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__snake_case :str ='\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__snake_case :Optional[Any] ='The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=[1, 10, 100] , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Optional[Any]=3.0 ) -> Optional[int]:
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
A = []
A = Counter()
A = 0
A = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
A = candidate + '\n' + test_case
A = (test_program, timeout, task_id, completion_id[task_id])
A = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
A = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
A , A = [], []
for result in results.values():
result.sort()
A = [r[1]['passed'] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
A = np.array(__UpperCamelCase )
A = np.array(__UpperCamelCase )
A = k
A = {f'''pass@{k}''': estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
def estimator(lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A = itertools.repeat(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
else:
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
A = iter(lowerCAmelCase__ )
return np.array([estimator(int(lowerCAmelCase__ ) , int(lowerCAmelCase__ ) , lowerCAmelCase__ ) for n, c in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) | 106 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__snake_case :str =False
class lowerCAmelCase__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A = torch.manual_seed(0 )
A = pipe.dual_guided(
prompt='first prompt' , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
A = VersatileDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = generator.manual_seed(0 )
A = pipe.dual_guided(
prompt='first prompt' , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __UpperCamelCase ( self : Tuple ) -> List[str]:
A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = 'cyberpunk 2077'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A = torch.manual_seed(0 )
A = pipe.dual_guided(
prompt=__UpperCamelCase , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A = 'A painting of a squirrel eating a burger '
A = torch.manual_seed(0 )
A = pipe.text_to_image(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A = pipe.image_variation(__UpperCamelCase , generator=__UpperCamelCase , output_type='numpy' ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 106 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : Dict ) -> Union[str, Any]:
__snake_case = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
__snake_case = 1024
__snake_case = 4096
__snake_case = 24
__snake_case = 16
__snake_case = [5, 11, 17, 23]
__snake_case = [256, 512, 1024, 1024]
__snake_case = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__snake_case = 768
__snake_case = [1, 1, 1, 0.5]
__snake_case = [256, 512, 768, 768]
__snake_case = 150
__snake_case = 16
__snake_case = (1, 384, 384)
__snake_case = False
__snake_case = """project"""
if "ade" in checkpoint_url:
__snake_case = True
__snake_case = 768
__snake_case = [1, 1, 1, 0.5]
__snake_case = 150
__snake_case = 16
__snake_case = """huggingface/label-files"""
__snake_case = """ade20k-id2label.json"""
__snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> List[str]:
__snake_case = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( snake_case_ : Tuple ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__snake_case = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__snake_case = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__snake_case = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
__snake_case = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__snake_case = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__snake_case = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__snake_case = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__snake_case = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__snake_case = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
__snake_case = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
__snake_case = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__snake_case = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__snake_case = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__snake_case = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__snake_case = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__snake_case = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__snake_case = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__snake_case = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__snake_case = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__snake_case = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__snake_case = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__snake_case = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__snake_case = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__snake_case = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__snake_case = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__snake_case = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__snake_case = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__snake_case = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__snake_case = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__snake_case = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__snake_case = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__snake_case = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__snake_case = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__snake_case = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__snake_case = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__snake_case = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__snake_case = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__snake_case = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__snake_case = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__snake_case = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
__snake_case = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
__snake_case = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
__snake_case = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__snake_case = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
__snake_case = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
__snake_case = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
__snake_case = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
__snake_case = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
__snake_case = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ) -> Tuple:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__snake_case = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[: config.hidden_size, :]
__snake_case = in_proj_bias[: config.hidden_size]
__snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case = in_proj_weight[
-config.hidden_size :, :
]
__snake_case = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> int:
__snake_case = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__snake_case = torch.load(__lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
__snake_case = state_dict.pop(__lowerCAmelCase )
__snake_case = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
__snake_case = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
__snake_case = 480 if """ade""" in checkpoint_url else 384
__snake_case = DPTImageProcessor(size=__lowerCAmelCase )
__snake_case = prepare_img()
__snake_case = image_processor(__lowerCAmelCase , return_tensors='''pt''' )
# forward pass
__snake_case = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
__snake_case = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
snake_case_ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 712 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 388 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case , __snake_case : List[Any] = image.size
__snake_case , __snake_case : Tuple = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__snake_case : str = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
__snake_case : int = np.array(__lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
__snake_case : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2 )
__snake_case : Union[str, Any] = torch.from_numpy(__lowerCamelCase )
return 2.0 * image - 1.0
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : VQModel , lowerCamelCase : UNetaDModel , lowerCamelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : Optional[int] = 100 , lowerCamelCase : Optional[float] = 0.0 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : Any = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Any = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}' )
if isinstance(lowerCamelCase , PIL.Image.Image ):
__snake_case : List[Any] = preprocess(lowerCamelCase )
__snake_case , __snake_case : int = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__snake_case : str = (batch_size, self.unet.config.in_channels // 2, height, width)
__snake_case : str = next(self.unet.parameters() ).dtype
__snake_case : Tuple = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__snake_case : List[Any] = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
__snake_case : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : int = {}
if accepts_eta:
__snake_case : List[str] = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__snake_case : Union[str, Any] = torch.cat([latents, image] , dim=1 )
__snake_case : Optional[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : int = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Union[str, Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__snake_case : List[Any] = self.vqvae.decode(lowerCamelCase ).sample
__snake_case : Dict = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
__snake_case : Any = image / 2 + 0.5
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 81 |
'''simple docstring'''
lowercase : int = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> Dict:
# Return True if there is node that has not iterated.
_snake_case = [False] * len(__A )
_snake_case = [s]
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__A )
_snake_case = True
_snake_case = u
return visited[t]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = [-1] * (len(__A ))
_snake_case = 0
_snake_case = []
_snake_case = [i[:] for i in graph] # Record original cut, copy.
while bfs(__A , __A , __A , __A ):
_snake_case = float('Inf' )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(__A , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
for i in range(len(__A ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 495 | 0 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a :List[str] = logging.getLogger()
def _lowercase ( ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
return args.f
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
SCREAMING_SNAKE_CASE__ : Dict = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
SCREAMING_SNAKE_CASE__ : int = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
SCREAMING_SNAKE_CASE__ : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 706 |
"""simple docstring"""
a :List[str] = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = 0
while place < len(__lowerCAmelCase ):
if (place + 1 < len(__lowerCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Any = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : List[str] = divmod(__lowerCAmelCase , __lowerCAmelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
_lowerCAmelCase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
__lowercase : List[int] = []
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
lowerCAmelCase__ : Dict = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else bos_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else eos_token
lowerCAmelCase__ : List[Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else unk_token
lowerCAmelCase__ : List[str] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else pad_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cls_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Dict = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token
lowerCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : Any = vocab_file
lowerCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : str = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = self.__dict__.copy()
lowerCAmelCase__ : Tuple = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : Any = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
return self.sp_model.piece_to_id(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : str = """"""
lowerCAmelCase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Any = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,**__UpperCAmelCase ,) -> str:
lowerCAmelCase__ : Optional[int] = kwargs.pop("""use_source_tokenizer""" ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.convert_ids_to_tokens(__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Optional[int] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[int] = []
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCAmelCase__ : str = re.sub(R""" (\[(MASK|SEP)\])""" ,R"""\1""" ,""" """.join(__UpperCAmelCase ) )
else:
lowerCAmelCase__ : Optional[int] = """""".join(__UpperCAmelCase )
lowerCAmelCase__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase__ : Union[str, Any] = self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 565 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_lowerCAmelCase = ''''''
_lowerCAmelCase = ''''''
_lowerCAmelCase = ''''''
_lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = get_dataset(UpperCamelCase , UpperCamelCase )
print("""Processing...""" )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = update_image_and_anno(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for index, image in enumerate(UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase__ : List[Any] = random_chars(32 )
lowerCAmelCase__ : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCAmelCase__ : Dict = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(UpperCamelCase )} with {file_name}""" )
lowerCAmelCase__ : Tuple = []
for anno in new_annos[index]:
lowerCAmelCase__ : Union[str, Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase )
with open(f"""/{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Tuple = []
for label_file in glob.glob(os.path.join(UpperCamelCase , """*.txt""" ) ):
lowerCAmelCase__ : Tuple = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(UpperCamelCase ) as in_file:
lowerCAmelCase__ : Any = in_file.readlines()
lowerCAmelCase__ : str = os.path.join(UpperCamelCase , f"""{label_name}.jpg""" )
lowerCAmelCase__ : Tuple = []
for obj_list in obj_lists:
lowerCAmelCase__ : Optional[int] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[str] = []
for idx in range(len(UpperCamelCase ) ):
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Optional[int] = img_list[idx]
path_list.append(UpperCamelCase )
lowerCAmelCase__ : List[Any] = anno_list[idx]
lowerCAmelCase__ : Dict = cva.imread(UpperCamelCase )
if flip_type == 1:
lowerCAmelCase__ : List[str] = cva.flip(UpperCamelCase , UpperCamelCase )
for bbox in img_annos:
lowerCAmelCase__ : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCAmelCase__ : Union[str, Any] = cva.flip(UpperCamelCase , UpperCamelCase )
for bbox in img_annos:
lowerCAmelCase__ : Any = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase )
new_imgs_list.append(UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase__ : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 565 | 1 |
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
lowercase_ :Dict = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase ( _a ) -> dict[str, str]:
'''simple docstring'''
lowercase_ :Union[str, Any] = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
lowercase_ :Union[str, Any] = remove_duplicates(key.upper() )
lowercase_ :Any = len(_a )
# First fill cipher with key characters
lowercase_ :List[Any] = {alphabet[i]: char for i, char in enumerate(_a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_a ) , 2_6 ):
lowercase_ :str = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowercase_ :Union[str, Any] = alphabet[i - offset]
lowercase_ :str = char
return cipher_alphabet
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(_a , _a ) for ch in message.upper() )
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
lowercase_ :Optional[Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_a , _a ) for ch in message.upper() )
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :Any = input('''Enter message to encode or decode: ''' ).strip()
lowercase_ :List[Any] = input('''Enter keyword: ''' ).strip()
lowercase_ :Optional[Any] = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowercase_ :List[str] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowercase_ :Optional[int] = create_cipher_map(_a )
print(func(_a , _a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 441 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , '''decord''' )
self.check_model_type(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ):
lowercase_ :int = {}
if frame_sampling_rate is not None:
lowercase_ :int = frame_sampling_rate
if num_frames is not None:
lowercase_ :int = num_frames
lowercase_ :str = {}
if top_k is not None:
lowercase_ :Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCamelCase_ , **UpperCamelCase_ ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=1 ):
if num_frames is None:
lowercase_ :str = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowercase_ :str = BytesIO(requests.get(UpperCamelCase_ ).content )
lowercase_ :Optional[int] = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
lowercase_ :Tuple = 0
lowercase_ :Optional[Any] = num_frames * frame_sampling_rate - 1
lowercase_ :Any = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
lowercase_ :Dict = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
lowercase_ :List[Any] = list(UpperCamelCase_ )
lowercase_ :Any = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[str] = self.model(**UpperCamelCase_ )
return model_outputs
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=5 ):
if top_k > self.model.config.num_labels:
lowercase_ :List[str] = self.model.config.num_labels
if self.framework == "pt":
lowercase_ :Optional[int] = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ :Dict = probs.topk(UpperCamelCase_ )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
lowercase_ :Dict = scores.tolist()
lowercase_ :Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 441 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : List[Any] = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
__UpperCAmelCase : List[Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : Optional[Any] = f'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split()
__UpperCAmelCase : Optional[Any] = [sys.executable] + distributed_args
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
| 382 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = KandinskyVaaImgaImgPipeline
UpperCAmelCase_ = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase_ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase_ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase_ = False
@property
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
return 32
@property
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
return 32
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return 1_00
@property
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : int = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_lowercase : List[str] = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.dummy_unet
_lowercase : Tuple = self.dummy_movq
_lowercase : Dict = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_lowercase : Dict = DDIMScheduler(**UpperCamelCase )
_lowercase : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str]=0 ):
"""simple docstring"""
_lowercase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_lowercase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase )
# create init_image
_lowercase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_lowercase : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Tuple = Image.fromarray(np.uinta(UpperCamelCase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(UpperCamelCase ).startswith('''mps''' ):
_lowercase : List[Any] = torch.manual_seed(UpperCamelCase )
else:
_lowercase : List[Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_lowercase : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : str = '''cpu'''
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**UpperCamelCase )
_lowercase : Dict = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Dict = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
_lowercase : Optional[Any] = output.images
_lowercase : Optional[int] = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : List[Any] = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
_lowercase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_lowercase : List[str] = '''A red cartoon frog, 4k'''
_lowercase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
_lowercase : List[str] = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
_lowercase : int = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
_lowercase , _lowercase : Optional[Any] = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_lowercase : str = pipeline(
image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
_lowercase : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) | 322 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__magic_name__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__magic_name__ = [0, 25, 50]
__magic_name__ = [25, 50, 75]
__magic_name__ = fuzz.membership.trimf(X, abca)
__magic_name__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__magic_name__ = np.ones(75)
__magic_name__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__magic_name__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__magic_name__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__magic_name__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__magic_name__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__magic_name__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__magic_name__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__magic_name__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__magic_name__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 679 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 1 |
'''simple docstring'''
from math import isqrt, loga
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = False
return [i for i in range(2 , UpperCamelCase__ ) if is_prime[i]]
def _UpperCamelCase ( UpperCamelCase__ = 8_0_0_8_0_0 , UpperCamelCase__ = 8_0_0_8_0_0 ):
UpperCAmelCase__ : Tuple = degree * loga(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = int(UpperCamelCase__ )
UpperCAmelCase__ : Any = calculate_prime_numbers(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Optional[Any] = len(UpperCamelCase__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""") | 407 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[Any] = use_input_mask
UpperCAmelCase__ : Union[str, Any] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : int = type_vocab_size
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : Optional[int] = num_choices
UpperCAmelCase__ : Dict = scope
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Tuple = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = NystromformerModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase)
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = NystromformerForMaskedLM(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[str] = NystromformerForQuestionAnswering(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Dict = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = NystromformerForSequenceClassification(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : str = NystromformerForTokenClassification(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : int = self.num_choices
UpperCAmelCase__ : Any = NystromformerForMultipleChoice(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : List[str] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase__ : List[str] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase__ : List[str] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase__ : Union[str, Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :int = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase :List[str] = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase :int = False
lowerCAmelCase :List[str] = False
def snake_case__ ( self):
UpperCAmelCase__ : str = NystromformerModelTester(self)
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase)
@slow
def snake_case__ ( self):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Union[str, Any] = NystromformerModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : int = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""")
UpperCAmelCase__ : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : List[str] = torch.Size((1, 6, 768))
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4))
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """the [MASK] of Belgium is Brussels"""
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""")
UpperCAmelCase__ : Optional[int] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""")
UpperCAmelCase__ : Any = tokenizer(_lowerCamelCase , return_tensors="""pt""")
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(encoding.input_ids).logits
UpperCAmelCase__ : Tuple = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(_lowerCamelCase) , """capital""") | 407 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=A_ , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
# configuration for running training on smdistributed Model Parallel
_lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
_lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
_lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
_lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=A_ , py_version='''py36''' , )
def UpperCamelCase_ ( self , A_ ) -> Tuple:
"""simple docstring"""
TrainingJobAnalytics(A_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , A_ ) -> List[str]:
"""simple docstring"""
# create estimator
_lowerCamelCase = self.create_estimator(A_ )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , A_ ) | 638 | import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ ) | 638 | 1 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( self : Dict ,a__ : Optional[Any] ,a__ : int ,a__ : Any ):
a__ = hf_hub_download(
repo_id="nateraw/video-demo" ,filename="archery.mp4" ,repo_type="dataset" )
a__ = VideoClassificationPipeline(model=a__ ,image_processor=a__ ,top_k=2 )
a__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def lowerCAmelCase_ ( self : int ,a__ : Tuple ,a__ : Optional[int] ):
for example in examples:
a__ = video_classifier(a__ )
self.assertEqual(
a__ ,[
{"score": ANY(a__ ), "label": ANY(a__ )},
{"score": ANY(a__ ), "label": ANY(a__ )},
] ,)
@require_torch
def lowerCAmelCase_ ( self : Dict ):
a__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
a__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} ,crop_size={"height": 10, "width": 10} )
a__ = pipeline(
"video-classification" ,model=a__ ,feature_extractor=a__ ,frame_sampling_rate=4 )
a__ = hf_hub_download(repo_id="nateraw/video-demo" ,filename="archery.mp4" ,repo_type="dataset" )
a__ = video_classifier(a__ ,top_k=2 )
self.assertEqual(
nested_simplify(a__ ,decimals=4 ) ,[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] ,)
a__ = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(a__ ,decimals=4 ) ,[
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] ,)
@require_tf
def lowerCAmelCase_ ( self : List[Any] ):
pass
| 331 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = [True] * limit
a__ = False
a__ = False
a__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
a__ = i * 2
while index < limit:
a__ = False
a__ = index + i
a__ = [2]
for i in range(3 , _lowercase , 2 ):
if is_prime[i]:
primes.append(_lowercase )
return primes
def _lowerCAmelCase (_lowercase = 1_00_00_00 ):
"""simple docstring"""
a__ = prime_sieve(_lowercase )
a__ = 0
a__ = 0
for i in range(len(_lowercase ) ):
for j in range(i + length , len(_lowercase ) ):
a__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
a__ = j - i
a__ = sol
return largest
if __name__ == "__main__":
print(F"{solution() = }")
| 331 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_snake_case : List[Any] = logging.get_logger(__name__)
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
_a = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
_a = downstream_dict["projector.weight"]
_a = downstream_dict["projector.bias"]
_a = downstream_dict["model.post_net.linear.weight"]
_a = downstream_dict["model.post_net.linear.bias"]
return model
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
_a = downstream_dict["model.linear.weight"]
_a = downstream_dict["model.linear.bias"]
return model
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ )
_a = downstream_dict["connector.weight"]
_a = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_a = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_a = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_a = downstream_dict["objective.W"]
return model
@torch.no_grad()
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
'''simple docstring'''
_a = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
_a = checkpoint["Downstream"]
_a = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
_a = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ )
_a = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
_a = convert_classification(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
_a = convert_diarization(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif arch.endswith('''ForXVector''' ):
_a = convert_xvector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_a = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_snake_case : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 702 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_snake_case : Any = True
except ImportError:
_snake_case : List[str] = False
try:
from torch.hub import _get_torch_home
_snake_case : Optional[Any] = _get_torch_home()
except ImportError:
_snake_case : Optional[int] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_snake_case : Tuple = os.path.join(torch_cache_home, 'transformers')
_snake_case : List[str] = 'https://cdn.huggingface.co'
_snake_case : Union[str, Any] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_snake_case : Dict = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_snake_case : Tuple = os.path.join(PATH, 'config.yaml')
_snake_case : str = os.path.join(PATH, 'attributes.txt')
_snake_case : Optional[int] = os.path.join(PATH, 'objects.txt')
_snake_case : List[str] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_snake_case : Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_snake_case : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_snake_case : Tuple = 'pytorch_model.bin'
_snake_case : Dict = 'config.yaml'
def snake_case_ (UpperCamelCase : Optional[Any]=OBJECTS , UpperCamelCase : int=ATTRIBUTES ):
'''simple docstring'''
_a = []
with open(UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
_a = []
with open(UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = OrderedDict()
with open(UpperCamelCase , '''rb''' ) as f:
_a = pkl.load(UpperCamelCase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
_a = ckp.pop(UpperCamelCase )
if isinstance(UpperCamelCase , np.ndarray ):
_a = torch.tensor(UpperCamelCase )
else:
assert isinstance(UpperCamelCase , torch.tensor ), type(UpperCamelCase )
_a = v
return r
class A :
lowercase_ = {}
def __init__( self : List[Any] , lowerCAmelCase_ : dict , lowerCAmelCase_ : str = "root" , lowerCAmelCase_ : List[str]=0 ) -> int:
"""simple docstring"""
_a = name
_a = level
_a = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_a = copy.deepcopy(lowerCAmelCase_ )
_a = copy.deepcopy(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a = Config(lowerCAmelCase_ , name=lowerCAmelCase_ , level=level + 1 )
_a = v
setattr(self , lowerCAmelCase_ , lowerCAmelCase_ )
_a = d
def __repr__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> List[Any]:
"""simple docstring"""
_a = val
_a = val
_a = key.split('''.''' )
_a = len(lowerCAmelCase_ ) - 1
_a = self._pointer
if len(lowerCAmelCase_ ) > 1:
for i, l in enumerate(lowerCAmelCase_ ):
if hasattr(self , lowerCAmelCase_ ) and isinstance(getattr(self , lowerCAmelCase_ ) , lowerCAmelCase_ ):
setattr(getattr(self , lowerCAmelCase_ ) , '''.'''.join(levels[i:] ) , lowerCAmelCase_ )
if l == last_level:
_a = val
else:
_a = pointer[l]
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return self._pointer
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> int:
"""simple docstring"""
with open(F'{file_name}' , '''w''' ) as stream:
dump(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
with open(F'{file_name}' , '''w''' ) as stream:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
with open(lowerCAmelCase_ ) as stream:
_a = load(lowerCAmelCase_ , Loader=lowerCAmelCase_ )
return data
def __str__( self : Optional[Any] ) -> int:
"""simple docstring"""
_a = ''' '''
if self._name != "root":
_a = F'{t * (self._level-1)}{self._name}:\n'
else:
_a = ''''''
_a = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(lowerCAmelCase_ ).__name__})\n'
_a = level
return r[:-1]
@classmethod
def __lowerCAmelCase ( cls : Dict , lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ) -> str:
"""simple docstring"""
_a , _a = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
return cls(lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( cls : Tuple , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
_a = kwargs.pop('''cache_dir''' , lowerCAmelCase_ )
_a = kwargs.pop('''force_download''' , lowerCAmelCase_ )
_a = kwargs.pop('''resume_download''' , lowerCAmelCase_ )
_a = kwargs.pop('''proxies''' , lowerCAmelCase_ )
_a = kwargs.pop('''local_files_only''' , lowerCAmelCase_ )
if os.path.isdir(lowerCAmelCase_ ):
_a = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
elif os.path.isfile(lowerCAmelCase_ ) or is_remote_url(lowerCAmelCase_ ):
_a = pretrained_model_name_or_path
else:
_a = hf_bucket_url(lowerCAmelCase_ , filename=lowerCAmelCase_ , use_cdn=lowerCAmelCase_ )
try:
# Load from URL or cache if already cached
_a = cached_path(
lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_a = Config.load_yaml(lowerCAmelCase_ )
except EnvironmentError:
_a = '''Can\'t load config for'''
raise EnvironmentError(lowerCAmelCase_ )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(lowerCAmelCase_ ), kwargs
def snake_case_ (UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = torch.load('''dump.pt''' , map_location=in_tensor.device )
_a = in_tensor.numpy()
_a = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(UpperCamelCase , UpperCamelCase , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(UpperCamelCase , UpperCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def snake_case_ (UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = urlparse(UpperCamelCase )
return parsed.scheme in ("http", "https")
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int=True ):
'''simple docstring'''
_a = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_a = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : List[str]=None , UpperCamelCase : str=0 , UpperCamelCase : List[Any]=None , ):
'''simple docstring'''
_a = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + "; ".join('''{}/{}'''.format(UpperCamelCase , UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + user_agent
_a = {'''user-agent''': ua}
if resume_size > 0:
_a = '''bytes=%d-''' % (resume_size,)
_a = requests.get(UpperCamelCase , stream=UpperCamelCase , proxies=UpperCamelCase , headers=UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
_a = response.headers.get('''Content-Length''' )
_a = resume_size + int(UpperCamelCase ) if content_length is not None else None
_a = tqdm(
unit='''B''' , unit_scale=UpperCamelCase , total=UpperCamelCase , initial=UpperCamelCase , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(UpperCamelCase ) )
temp_file.write(UpperCamelCase )
progress.close()
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Optional[int]=None , UpperCamelCase : Any=10 , UpperCamelCase : int=False , UpperCamelCase : int=None , UpperCamelCase : Any=False , ):
'''simple docstring'''
if cache_dir is None:
_a = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase , UpperCamelCase ):
_a = str(UpperCamelCase )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
_a = None
if not local_files_only:
try:
_a = requests.head(UpperCamelCase , allow_redirects=UpperCamelCase , proxies=UpperCamelCase , timeout=UpperCamelCase )
if response.status_code == 200:
_a = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_a = url_to_filename(UpperCamelCase , UpperCamelCase )
# get cache path to put the file
_a = os.path.join(UpperCamelCase , UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(UpperCamelCase ):
return cache_path
else:
_a = [
file
for file in fnmatch.filter(os.listdir(UpperCamelCase ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(UpperCamelCase ) > 0:
return os.path.join(UpperCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_a = cache_path + '''.lock'''
with FileLock(UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_a = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(UpperCamelCase , '''a+b''' ) as f:
yield f
_a = _resumable_file_manager
if os.path.exists(UpperCamelCase ):
_a = os.stat(UpperCamelCase ).st_size
else:
_a = 0
else:
_a = partial(tempfile.NamedTemporaryFile , dir=UpperCamelCase , delete=UpperCamelCase )
_a = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , UpperCamelCase , temp_file.name , )
http_get(
UpperCamelCase , UpperCamelCase , proxies=UpperCamelCase , resume_size=UpperCamelCase , user_agent=UpperCamelCase , )
os.replace(temp_file.name , UpperCamelCase )
_a = {'''url''': url, '''etag''': etag}
_a = cache_path + '''.json'''
with open(UpperCamelCase , '''w''' ) as meta_file:
json.dump(UpperCamelCase , UpperCamelCase )
return cache_path
def snake_case_ (UpperCamelCase : int , UpperCamelCase : List[str]=None ):
'''simple docstring'''
_a = url.encode('''utf-8''' )
_a = shaaaa(UpperCamelCase )
_a = url_hash.hexdigest()
if etag:
_a = etag.encode('''utf-8''' )
_a = shaaaa(UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Dict=None , UpperCamelCase : Tuple=False , UpperCamelCase : List[Any]=None , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : List[Any]=None , UpperCamelCase : Dict=False , UpperCamelCase : List[str]=False , UpperCamelCase : Tuple=False , ):
'''simple docstring'''
if cache_dir is None:
_a = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase , UpperCamelCase ):
_a = str(UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
_a = str(UpperCamelCase )
if is_remote_url(UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
_a = get_from_cache(
UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , user_agent=UpperCamelCase , local_files_only=UpperCamelCase , )
elif os.path.exists(UpperCamelCase ):
# File, and it exists.
_a = url_or_filename
elif urlparse(UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(UpperCamelCase ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(UpperCamelCase ) and not tarfile.is_tarfile(UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_a , _a = os.path.split(UpperCamelCase )
_a = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
_a = os.path.join(UpperCamelCase , UpperCamelCase )
if os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_a = output_path + '''.lock'''
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
os.makedirs(UpperCamelCase )
if is_zipfile(UpperCamelCase ):
with ZipFile(UpperCamelCase , '''r''' ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(UpperCamelCase ):
_a = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(UpperCamelCase ) )
return output_path_extracted
return output_path
def snake_case_ (UpperCamelCase : int , UpperCamelCase : List[Any]="," ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase )
if os.path.isfile(UpperCamelCase ):
with open(UpperCamelCase ) as f:
_a = eval(f.read() )
else:
_a = requests.get(UpperCamelCase )
try:
_a = requests.json()
except Exception:
_a = req.content.decode()
assert data is not None, "could not connect"
try:
_a = eval(UpperCamelCase )
except Exception:
_a = data.split('''\n''' )
req.close()
return data
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
_a = requests.get(UpperCamelCase )
_a = np.array(Image.open(BytesIO(response.content ) ) )
return img
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(UpperCamelCase )
with open(UpperCamelCase , '''rb''' ) as stream:
_a = pkl.load(UpperCamelCase )
_a = weights.pop('''model''' )
_a = {}
for k, v in model.items():
_a = torch.from_numpy(UpperCamelCase )
if "running_var" in k:
_a = torch.tensor([0] )
_a = k.replace('''running_var''' , '''num_batches_tracked''' )
_a = zero
return new
def snake_case_ ():
'''simple docstring'''
print(f'{os.path.abspath(os.path.join(UpperCamelCase , os.pardir ) )}/demo.ipynb' )
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Optional[Any]="RGB" ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase )
if os.path.isfile(UpperCamelCase ):
_a = cva.imread(UpperCamelCase )
else:
_a = get_image_from_url(UpperCamelCase )
assert img is not None, f'could not connect to: {im}'
_a = cva.cvtColor(UpperCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_a = img[:, :, ::-1]
return img
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=1 ):
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(UpperCamelCase ) , UpperCamelCase ))
| 377 | 0 |
"""simple docstring"""
import math
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Dict = len(_snake_case )
UpperCAmelCase__ : List[str] = int(math.floor(math.sqrt(_snake_case ) ) )
UpperCAmelCase__ : Optional[Any] = 0
while arr[min(_snake_case ,_snake_case ) - 1] < x:
UpperCAmelCase__ : Optional[int] = step
step += int(math.floor(math.sqrt(_snake_case ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCAmelCase__ : Optional[int] = prev + 1
if prev == min(_snake_case ,_snake_case ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCamelCase__ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(',')]
UpperCamelCase__ = int(input('Enter the number to be searched:\n'))
UpperCamelCase__ = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f'Number {x} is at index {res}')
| 110 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class a ( lowercase ):
UpperCamelCase : Union[str, Any] = """bert-generation"""
def __init__( self , UpperCamelCase_=50_358 , UpperCamelCase_=1_024 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=4_096 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_="absolute" , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = position_embedding_type
UpperCAmelCase__ : Dict = use_cache
| 110 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : List[Any] = BertJapaneseTokenizer
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : Optional[int] = True
def _snake_case ( self : Tuple):
super().setUp()
SCREAMING_SNAKE_CASE_ :Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
SCREAMING_SNAKE_CASE_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _snake_case ( self : Any , UpperCAmelCase : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Dict = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ :Dict = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _snake_case ( self : List[Any] , UpperCAmelCase : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = self.get_input_output_texts(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase)
return text, ids
def _snake_case ( self : List[Any]):
pass # TODO add if relevant
def _snake_case ( self : Any):
pass # TODO add if relevant
def _snake_case ( self : Union[str, Any]):
pass # TODO add if relevant
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer_class(self.vocab_file)
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。")
self.assertListEqual(UpperCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab")
self.assertIsNotNone(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer.tokenize(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE_ :Any = os.path.join(self.tmpdirname , "tokenizer.bin")
with open(UpperCAmelCase , "wb") as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase)
with open(UpperCAmelCase , "rb") as handle:
SCREAMING_SNAKE_CASE_ :Optional[int] = pickle.load(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = tokenizer_new.tokenize(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ :str = MecabTokenizer(mecab_dic="ipadic")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Any):
try:
SCREAMING_SNAKE_CASE_ :List[Any] = MecabTokenizer(mecab_dic="unidic_lite")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Union[str, Any]):
try:
SCREAMING_SNAKE_CASE_ :List[str] = MecabTokenizer(mecab_dic="unidic")
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :str = MecabTokenizer(do_lower_case=UpperCAmelCase , mecab_dic="ipadic")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self : int):
try:
SCREAMING_SNAKE_CASE_ :Optional[int] = MecabTokenizer(
do_lower_case=UpperCAmelCase , normalize_text=UpperCAmelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic")
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = MecabTokenizer(normalize_text=UpperCAmelCase , mecab_dic="ipadic")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi")
self.assertIsNotNone(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ :int = tokenizer.tokenize(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE_ :Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin")
with open(UpperCAmelCase , "wb") as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase)
with open(UpperCAmelCase , "rb") as handle:
SCREAMING_SNAKE_CASE_ :Tuple = pickle.load(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer_new.tokenize(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
@require_sudachi
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ :Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A")
self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国", "人", "参政", "権"])
@require_sudachi
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ :Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B")
self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国人", "参政権"])
@require_sudachi
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :List[str] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C")
self.assertListEqual(tokenizer.tokenize("外国人参政権") , ["外国人参政権"])
@require_sudachi
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :List[str] = SudachiTokenizer(do_lower_case=UpperCAmelCase , sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = SudachiTokenizer(normalize_text=UpperCAmelCase , sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :int = SudachiTokenizer(trim_whitespace=UpperCAmelCase , sudachi_dict_type="core")
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp")
self.assertIsNotNone(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = "こんにちは、世界。\nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ :Dict = tokenizer.tokenize(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
SCREAMING_SNAKE_CASE_ :List[str] = os.path.join(self.tmpdirname , "tokenizer.bin")
with open(UpperCAmelCase , "wb") as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase)
with open(UpperCAmelCase , "rb") as handle:
SCREAMING_SNAKE_CASE_ :int = pickle.load(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[str] = tokenizer_new.tokenize(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
@require_jumanpp
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :Tuple = JumanppTokenizer(do_lower_case=UpperCAmelCase)
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ :List[Any] = JumanppTokenizer(normalize_text=UpperCAmelCase)
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Any = JumanppTokenizer(trim_whitespace=UpperCAmelCase)
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 ") , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。") , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
SCREAMING_SNAKE_CASE_ :Tuple = {}
for i, token in enumerate(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :List[Any] = i
SCREAMING_SNAKE_CASE_ :int = WordpieceTokenizer(vocab=UpperCAmelCase , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("こんにちは") , ["こんにちは"])
self.assertListEqual(tokenizer.tokenize("こんばんは") , ["こん", "##ばんは"])
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは") , ["こん", "##ばんは", "[UNK]", "こんにちは"])
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :List[str] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE_ :Union[str, Any] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。")
self.assertListEqual(UpperCAmelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"])
SCREAMING_SNAKE_CASE_ :str = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは")
self.assertListEqual(UpperCAmelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"])
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese")
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : List[Any] = BertJapaneseTokenizer
lowerCamelCase_ : Optional[Any] = False
def _snake_case ( self : int):
super().setUp()
SCREAMING_SNAKE_CASE_ :Any = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _snake_case ( self : str , **UpperCAmelCase : Union[str, Any]):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **UpperCAmelCase)
def _snake_case ( self : List[str] , UpperCAmelCase : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :List[Any] = "こんにちは、世界。 \nこんばんは、世界。"
SCREAMING_SNAKE_CASE_ :Dict = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _snake_case ( self : List[Any]):
pass # TODO add if relevant
def _snake_case ( self : Tuple):
pass # TODO add if relevant
def _snake_case ( self : Union[str, Any]):
pass # TODO add if relevant
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character")
SCREAMING_SNAKE_CASE_ :int = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。")
self.assertListEqual(
UpperCAmelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :Tuple = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {}
for i, token in enumerate(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :str = i
SCREAMING_SNAKE_CASE_ :Tuple = CharacterTokenizer(vocab=UpperCAmelCase , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("こんにちは") , ["こ", "ん", "に", "ち", "は"])
self.assertListEqual(tokenizer.tokenize("こんにちほ") , ["こ", "ん", "に", "ち", "[UNK]"])
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :List[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char")
SCREAMING_SNAKE_CASE_ :Dict = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :Any = "cl-tohoku/bert-base-japanese"
SCREAMING_SNAKE_CASE_ :str = AutoTokenizer.from_pretrained(UpperCAmelCase)
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ :List[Any] = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING") as cm:
BertTokenizer.from_pretrained(UpperCAmelCase)
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from."))
SCREAMING_SNAKE_CASE_ :Tuple = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING") as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase)
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from."))
| 140 |
from collections import defaultdict
class _UpperCAmelCase :
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :Dict = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE_ :Optional[int] = [
[-1 for i in range(total + 1)] for j in range(2 ** len(UpperCAmelCase))
]
SCREAMING_SNAKE_CASE_ :Tuple = defaultdict(UpperCAmelCase) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE_ :Tuple = (1 << len(UpperCAmelCase)) - 1
def _snake_case ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Dict):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE_ :Optional[int] = self.count_ways_until(UpperCAmelCase , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
SCREAMING_SNAKE_CASE_ :Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def _snake_case ( self : str , UpperCAmelCase : Optional[int]):
# Store the list of persons for each task
for i in range(len(UpperCAmelCase)):
for j in task_performed[i]:
self.task[j].append(UpperCAmelCase)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
SCREAMING_SNAKE_CASE__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 140 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ : Union[str, Any] =16
__magic_name__ : Dict =32
def __snake_case ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] = 16 ):
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained("bert-base-cased" )
__magic_name__ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase_ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ = 16
elif accelerator.mixed_precision != "no":
__magic_name__ = 8
else:
__magic_name__ = None
return tokenizer.pad(
lowerCamelCase_ , padding="longest" , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
__magic_name__ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__magic_name__ : str =mocked_dataloaders # noqa: F811
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase_ ) == "1":
__magic_name__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__magic_name__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
__magic_name__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config["lr"]
__magic_name__ = int(config["num_epochs"] )
__magic_name__ = int(config["seed"] )
__magic_name__ = int(config["batch_size"] )
set_seed(lowerCamelCase_ )
__magic_name__ , __magic_name__ = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
__magic_name__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ = AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__magic_name__ = os.path.split(lowerCamelCase_ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__magic_name__ = 0
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ = model(**lowerCamelCase_ )
__magic_name__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**lowerCamelCase_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
__magic_name__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase_ ),
"epoch": epoch,
} , step=lowerCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__magic_name__ = parser.parse_args()
__magic_name__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 664 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Union[str, Any] = "pix2struct_text_model"
UpperCamelCase_ : str = ["past_key_values"]
UpperCamelCase_ : str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a=5_02_44 , a=7_68 , a=64 , a=20_48 , a=12 , a=12 , a=32 , a=1_28 , a=0.1 , a=1e-6 , a=1.0 , a="gelu_new" , a=0 , a=False , a=0 , a=1 , a=False , a=True , **a , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = d_kv
_UpperCamelCase = d_ff
_UpperCamelCase = num_layers
_UpperCamelCase = num_heads
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = dropout_rate
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_factor
_UpperCamelCase = use_cache
_UpperCamelCase = eos_token_id
_UpperCamelCase = decoder_start_token_id
# for backwards compatibility
_UpperCamelCase = dense_act_fn
super().__init__(
pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , tie_word_embeddings=a , is_decoder=a , **a , )
@classmethod
def A_ ( cls , a , **a ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_UpperCamelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : int = "pix2struct_vision_model"
def __init__( self , a=7_68 , a=7_68 , a=20_48 , a=64 , a=12 , a=12 , a="gelu_new" , a=1e-6 , a=0.0 , a=0.0 , a=1e-10 , a=1.0 , a=40_96 , a=32 , a=1_28 , **a , ) -> Tuple:
'''simple docstring'''
super().__init__(**a )
_UpperCamelCase = hidden_size
_UpperCamelCase = patch_embed_hidden_size
_UpperCamelCase = d_ff
_UpperCamelCase = dropout_rate
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = initializer_range
_UpperCamelCase = initializer_factor
_UpperCamelCase = attention_dropout
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = dense_act_fn
_UpperCamelCase = seq_len
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = d_kv
@classmethod
def A_ ( cls , a , **a ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_UpperCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a , **a )
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Dict = "pix2struct"
UpperCamelCase_ : int = True
def __init__( self , a=None , a=None , a=1.0 , a=0.02 , a=False , a=False , a=True , **a , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(tie_word_embeddings=a , is_encoder_decoder=a , **a )
if text_config is None:
_UpperCamelCase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
_UpperCamelCase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
_UpperCamelCase = PixaStructTextConfig(**a )
_UpperCamelCase = PixaStructVisionConfig(**a )
_UpperCamelCase = self.text_config.decoder_start_token_id
_UpperCamelCase = self.text_config.pad_token_id
_UpperCamelCase = self.text_config.eos_token_id
_UpperCamelCase = initializer_factor
_UpperCamelCase = initializer_range
_UpperCamelCase = self.initializer_range
_UpperCamelCase = self.initializer_range
_UpperCamelCase = is_vqa
@classmethod
def A_ ( cls , a , a , **a ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.text_config.to_dict()
_UpperCamelCase = self.vision_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 612 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowercase = logging.get_logger(__name__)
# General docstring
__lowercase = '''ResNetConfig'''
# Base docstring
__lowercase = '''microsoft/resnet-50'''
__lowercase = [1, 2_0_4_8, 7, 7]
# Image classification docstring
__lowercase = '''microsoft/resnet-50'''
__lowercase = '''tiger cat'''
__lowercase = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3 , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu"):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Convad(
__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , bias=__lowerCAmelCase)
lowerCAmelCase = nn.BatchNormad(__lowerCAmelCase)
lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.convolution(__lowerCAmelCase)
lowerCAmelCase = self.normalization(__lowerCAmelCase)
lowerCAmelCase = self.activation(__lowerCAmelCase)
return hidden_state
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super().__init__()
lowerCAmelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
lowerCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
lowerCAmelCase = config.num_channels
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
lowerCAmelCase = self.embedder(__lowerCAmelCase)
lowerCAmelCase = self.pooler(__lowerCAmelCase)
return embedding
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 2):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase)
lowerCAmelCase = nn.BatchNormad(__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.convolution(__lowerCAmelCase)
lowerCAmelCase = self.normalization(__lowerCAmelCase)
return hidden_state
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu"):
"""simple docstring"""
super().__init__()
lowerCAmelCase = in_channels != out_channels or stride != 1
lowerCAmelCase = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , activation=__lowerCAmelCase) , )
lowerCAmelCase = ACTaFN[activation]
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = hidden_state
lowerCAmelCase = self.layer(__lowerCAmelCase)
lowerCAmelCase = self.shortcut(__lowerCAmelCase)
hidden_state += residual
lowerCAmelCase = self.activation(__lowerCAmelCase)
return hidden_state
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 4):
"""simple docstring"""
super().__init__()
lowerCAmelCase = in_channels != out_channels or stride != 1
lowerCAmelCase = out_channels // reduction
lowerCAmelCase = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase) , )
lowerCAmelCase = ACTaFN[activation]
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = hidden_state
lowerCAmelCase = self.layer(__lowerCAmelCase)
lowerCAmelCase = self.shortcut(__lowerCAmelCase)
hidden_state += residual
lowerCAmelCase = self.activation(__lowerCAmelCase)
return hidden_state
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , activation=config.hidden_act) , *[layer(__lowerCAmelCase , __lowerCAmelCase , activation=config.hidden_act) for _ in range(depth - 1)] , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = input
for layer in self.layers:
lowerCAmelCase = layer(__lowerCAmelCase)
return hidden_state
class a__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super().__init__()
lowerCAmelCase = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:]):
self.stages.append(ResNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = True):
"""simple docstring"""
lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase = hidden_states + (hidden_state,)
lowerCAmelCase = stage_module(__lowerCAmelCase)
if output_hidden_states:
lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase , )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ResNetConfig
UpperCAmelCase_ : Union[str, Any] = '''resnet'''
UpperCAmelCase_ : Optional[Any] = '''pixel_values'''
UpperCAmelCase_ : int = True
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if isinstance(__lowerCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""")
elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=False):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = value
__lowercase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowercase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , lowerCAmelCase__ , )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase)
lowerCAmelCase = config
lowerCAmelCase = ResNetEmbeddings(__lowerCAmelCase)
lowerCAmelCase = ResNetEncoder(__lowerCAmelCase)
lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase = self.embedder(__lowerCAmelCase)
lowerCAmelCase = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase)
lowerCAmelCase = encoder_outputs[0]
lowerCAmelCase = self.pooler(__lowerCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCAmelCase__ , )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase)
lowerCAmelCase = config.num_labels
lowerCAmelCase = ResNetModel(__lowerCAmelCase)
# classification head
lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase = self.resnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase)
lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase = self.classifier(__lowerCAmelCase)
lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase = """single_label_classification"""
else:
lowerCAmelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCAmelCase = MSELoss()
if self.num_labels == 1:
lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze())
else:
lowerCAmelCase = loss_fct(__lowerCAmelCase , __lowerCAmelCase)
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase = BCEWithLogitsLoss()
lowerCAmelCase = loss_fct(__lowerCAmelCase , __lowerCAmelCase)
if not return_dict:
lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states)
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , lowerCAmelCase__ , )
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase)
super()._init_backbone(__lowerCAmelCase)
lowerCAmelCase = [config.embedding_size] + config.hidden_sizes
lowerCAmelCase = ResNetEmbeddings(__lowerCAmelCase)
lowerCAmelCase = ResNetEncoder(__lowerCAmelCase)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase)
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase = self.embedder(__lowerCAmelCase)
lowerCAmelCase = self.encoder(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase)
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCAmelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__lowerCAmelCase , )
| 707 | '''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 605 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCamelCase : Optional[Any] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
lowerCamelCase : Union[str, Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
lowerCamelCase : Optional[int] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def a__ ( self : Any , A_ : Optional[Any] , A_ : List[Any] , A_ : List[Any]=None ) -> Tuple:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(A_ , A_ , sample_weight=A_ ) ),
}
| 70 |
from collections.abc import Sequence
def lowerCamelCase_ ( lowerCAmelCase__ : Sequence[int] | None = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
A = nums[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
A = nums[i]
A = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__snake_case :str =int(input('Enter number of elements : ').strip())
__snake_case :Optional[int] =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array)) | 106 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _lowercase :
def __init__( self : Optional[int] , a : List[str] , a : str=2 , a : Dict=3_2 , a : Tuple=1_6 , a : str=3 , a : Union[str, Any]=True , a : Union[str, Any]=True , a : List[str]=3_2 , a : Union[str, Any]=4 , a : Optional[int]=[0, 1, 2, 3] , a : Dict=4 , a : Optional[Any]=3_7 , a : Tuple="gelu" , a : Optional[Any]=0.1 , a : List[str]=0.1 , a : Tuple=0.0_2 , a : Any=3 , a : Optional[int]=[1, 3_8_4, 2_4, 2_4] , a : int=True , a : Any=None , ):
"""simple docstring"""
__snake_case : str =parent
__snake_case : int =batch_size
__snake_case : Optional[int] =image_size
__snake_case : Optional[Any] =patch_size
__snake_case : Optional[int] =num_channels
__snake_case : List[str] =is_training
__snake_case : Optional[Any] =use_labels
__snake_case : Optional[int] =hidden_size
__snake_case : str =num_hidden_layers
__snake_case : Optional[Any] =backbone_out_indices
__snake_case : Optional[int] =num_attention_heads
__snake_case : List[Any] =intermediate_size
__snake_case : Union[str, Any] =hidden_act
__snake_case : List[Any] =hidden_dropout_prob
__snake_case : Any =attention_probs_dropout_prob
__snake_case : Any =initializer_range
__snake_case : Union[str, Any] =num_labels
__snake_case : int =backbone_featmap_shape
__snake_case : Optional[int] =scope
__snake_case : Union[str, Any] =is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case : str =(image_size // patch_size) ** 2
__snake_case : List[Any] =num_patches + 1
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[Any] =None
if self.use_labels:
__snake_case : str =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : Tuple ={
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a , backbone_featmap_shape=self.backbone_featmap_shape , )
def _UpperCamelCase ( self : List[Any] , a : Dict , a : Optional[Any] , a : int ):
"""simple docstring"""
__snake_case : Tuple =DPTModel(config=a )
model.to(a )
model.eval()
__snake_case : List[str] =model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , a : Optional[int] , a : List[Any] , a : List[str] ):
"""simple docstring"""
__snake_case : Dict =self.num_labels
__snake_case : Optional[Any] =DPTForDepthEstimation(a )
model.to(a )
model.eval()
__snake_case : Any =model(a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _UpperCamelCase ( self : int , a : str , a : Union[str, Any] , a : Optional[int] ):
"""simple docstring"""
__snake_case : str =self.num_labels
__snake_case : Dict =DPTForSemanticSegmentation(a )
model.to(a )
model.eval()
__snake_case : Optional[Any] =model(a , labels=a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : int =self.prepare_config_and_inputs()
__snake_case : List[str] =config_and_inputs
__snake_case : Tuple ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : str = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_a : Dict = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a : Optional[int] = False
_a : List[Any] = False
_a : int = False
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : Any =DPTModelTester(self )
__snake_case : int =ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] =model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[str] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] =model_class(a )
__snake_case : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] =[*signature.parameters.keys()]
__snake_case : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , a )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] =True
if model_class in get_values(a ):
continue
__snake_case : Tuple =model_class(a )
model.to(a )
model.train()
__snake_case : List[str] =self._prepare_for_class(a , a , return_labels=a )
__snake_case : int =model(**a ).loss
loss.backward()
def _UpperCamelCase ( self : str ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int =False
__snake_case : str =True
if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case : List[Any] =model_class(a )
model.to(a )
model.gradient_checkpointing_enable()
model.train()
__snake_case : List[Any] =self._prepare_for_class(a , a , return_labels=a )
__snake_case : Any =model(**a ).loss
loss.backward()
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] =_config_zero_init(a )
for model_class in self.all_model_classes:
__snake_case : List[str] =model_class(config=a )
# Skip the check for the backbone
__snake_case : List[Any] =[]
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case : Any =[f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
@slow
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case : Dict =DPTModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : int =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple ='''add'''
with self.assertRaises(a ):
__snake_case : List[Any] =DPTForDepthEstimation(a )
def __lowercase ( ) -> Any:
__snake_case : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Tuple =DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
__snake_case : Dict =DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(a )
__snake_case : List[Any] =prepare_img()
__snake_case : Optional[Any] =image_processor(images=a , return_tensors='''pt''' ).to(a )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] =model(**a )
__snake_case : Union[str, Any] =outputs.predicted_depth
# verify the predicted depth
__snake_case : List[Any] =torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , a )
__snake_case : Union[str, Any] =torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , a , atol=1e-4 ) )
| 721 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self : List[Any] , a : int , a : Dict=7 , a : int=3 , a : int=1_8 , a : Dict=3_0 , a : Dict=4_0_0 , a : Optional[Any]=True , a : Dict=None , a : int=True , a : Dict=False , a : int=True , a : str=True , a : List[str]=[0.5, 0.5, 0.5] , a : Optional[Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__snake_case : List[Any] =parent
__snake_case : List[Any] =batch_size
__snake_case : str =num_channels
__snake_case : Dict =image_size
__snake_case : str =min_resolution
__snake_case : Tuple =max_resolution
__snake_case : str =do_resize
__snake_case : Any =size if size is not None else {'''height''': 1_8, '''width''': 2_0}
__snake_case : List[Any] =do_thumbnail
__snake_case : Tuple =do_align_axis
__snake_case : Any =do_pad
__snake_case : Dict =do_normalize
__snake_case : List[Any] =image_mean
__snake_case : Any =image_std
def _UpperCamelCase ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( lowerCAmelCase , unittest.TestCase ):
_a : str = DonutImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : Optional[Any] =DonutImageProcessingTester(self )
@property
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , '''do_resize''' ) )
self.assertTrue(hasattr(a , '''size''' ) )
self.assertTrue(hasattr(a , '''do_thumbnail''' ) )
self.assertTrue(hasattr(a , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(a , '''do_pad''' ) )
self.assertTrue(hasattr(a , '''do_normalize''' ) )
self.assertTrue(hasattr(a , '''image_mean''' ) )
self.assertTrue(hasattr(a , '''image_std''' ) )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 2_0} )
__snake_case : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
# Previous config had dimensions in (width, height) order
__snake_case : Tuple =self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'''height''': 8_4, '''width''': 4_2} )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
@is_flaky()
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
__snake_case : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case : Optional[int] =image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
__snake_case : List[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case : int =image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
__snake_case : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case : Optional[int] =image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 497 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__SCREAMING_SNAKE_CASE : int = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('''RGB''' )
__SCREAMING_SNAKE_CASE : Optional[int] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__SCREAMING_SNAKE_CASE : Any = transform(snake_case ).unsqueeze(0 ).to(snake_case )
return image
def a__ ( snake_case ):
"""simple docstring"""
if "visual_encoder" in key:
__SCREAMING_SNAKE_CASE : Dict = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , snake_case )
if "blocks" in key:
__SCREAMING_SNAKE_CASE : Tuple = re.sub(R'''blocks''' , '''layers''' , snake_case )
if "attn" in key:
__SCREAMING_SNAKE_CASE : int = re.sub(R'''attn''' , '''self_attn''' , snake_case )
if "norm1" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(R'''norm1''' , '''layer_norm1''' , snake_case )
if "norm2" in key:
__SCREAMING_SNAKE_CASE : str = re.sub(R'''norm2''' , '''layer_norm2''' , snake_case )
if "encoder.norm" in key:
__SCREAMING_SNAKE_CASE : str = re.sub(R'''encoder.norm''' , '''post_layernorm''' , snake_case )
if "encoder.patch_embed.proj" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , snake_case )
if "encoder.pos_embed" in key:
__SCREAMING_SNAKE_CASE : Any = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , snake_case )
if "encoder.cls_token" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , snake_case )
if "self_attn" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , snake_case )
return key
@torch.no_grad()
def a__ ( snake_case , snake_case=None ):
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE : List[str] = BlipConfig.from_pretrained(snake_case )
else:
__SCREAMING_SNAKE_CASE : Tuple = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__SCREAMING_SNAKE_CASE : Optional[Any] = BlipForConditionalGeneration(snake_case ).eval()
__SCREAMING_SNAKE_CASE : Tuple = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__SCREAMING_SNAKE_CASE : Optional[Any] = blip_decoder(pretrained=snake_case , image_size=384 , vit='''base''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pt_model.eval()
__SCREAMING_SNAKE_CASE : Tuple = pt_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : List[str] = modified_state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = value
hf_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : List[Any] = load_demo_image(image_size=snake_case , device='''cpu''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(['''a picture of'''] ).input_ids
__SCREAMING_SNAKE_CASE : Dict = hf_model.generate(snake_case , snake_case )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
__SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.generate(snake_case )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__SCREAMING_SNAKE_CASE : Dict = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__SCREAMING_SNAKE_CASE : Dict = blip_vqa(pretrained=snake_case , image_size=snake_case , vit='''base''' )
vqa_model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = vqa_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : List[str] = modified_state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : str = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = value
__SCREAMING_SNAKE_CASE : str = BlipForQuestionAnswering(snake_case )
hf_vqa_model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : Dict = ['''How many dogs are in this image?''']
__SCREAMING_SNAKE_CASE : Any = tokenizer(snake_case , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE : Tuple = hf_vqa_model.generate(snake_case , snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__SCREAMING_SNAKE_CASE : str = blip_itm(pretrained=snake_case , image_size=snake_case , vit='''base''' )
itm_model.eval()
__SCREAMING_SNAKE_CASE : List[str] = itm_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : List[str] = modified_state_dict.pop(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = value
__SCREAMING_SNAKE_CASE : int = BlipForImageTextRetrieval(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = ['''A picture of a woman with a dog sitting in a beach''']
__SCREAMING_SNAKE_CASE : Any = tokenizer(
snake_case , return_tensors='''pt''' , padding='''max_length''' , truncation=snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case )
hf_itm_model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = hf_itm_model(snake_case , snake_case , use_itm_head=snake_case )
__SCREAMING_SNAKE_CASE : int = hf_itm_model(snake_case , snake_case , use_itm_head=snake_case )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowercase_ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 74 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 )
__SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 74 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
a__ : Optional[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
a__ : Optional[Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
a__ : Any = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def __a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __a ( self , a=None , a=None , a=False ):
if concatenate_texts:
return compute_measures(a , a )["wer"]
else:
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for prediction, reference in zip(a , a ):
UpperCamelCase__ = compute_measures(a , a )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[int] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['OwlViTFeatureExtractor']
a__ : Tuple = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 223 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : str = TypeVar("KT")
_SCREAMING_SNAKE_CASE : Tuple = TypeVar("VT")
class _snake_case ( Generic[KT, VT] ):
def __init__( self , a__ = "root" , a__ = None ) -> Dict:
'''simple docstring'''
snake_case_ = key
snake_case_ = value
snake_case_ = []
def __repr__( self ) -> str:
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _snake_case ( Generic[KT, VT] ):
def __init__( self , a__ = 0.5 , a__ = 16 ) -> Optional[int]:
'''simple docstring'''
snake_case_ = Node[KT, VT]()
snake_case_ = 0
snake_case_ = p
snake_case_ = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ = list(self )
if len(a__ ) == 0:
return F'SkipList(level={self.level})'
snake_case_ = max((len(str(a__ ) ) for item in items) , default=4 )
snake_case_ = max(a__ , 4 ) + 4
snake_case_ = self.head
snake_case_ = []
snake_case_ = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(a__ , "-" ) + "* " * len(a__ ) )
lines.append(" " * label_size + "| " * len(a__ ) )
while len(node.forward ) != 0:
snake_case_ = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(a__ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(a__ ) )
snake_case_ = node.forward
lines.append("None".ljust(a__ ) + "* " * len(a__ ) )
return F'SkipList(level={self.level})\n' + "\n".join(a__ )
def __iter__( self ) -> int:
'''simple docstring'''
snake_case_ = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ = node.forward[0]
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowerCAmelCase__ ( self , a__ ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ = []
snake_case_ = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(a__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ , snake_case_ = self._locate_node(a__ )
if node is not None:
for i, update_node in enumerate(a__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ = node.forward[i]
else:
snake_case_ = update_node.forward[:i]
def lowerCAmelCase__ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ = self._locate_node(a__ )
if node is not None:
snake_case_ = value
else:
snake_case_ = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , a__ ):
update_vector.append(self.head )
snake_case_ = level
snake_case_ = Node(a__ , a__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(a__ )
else:
snake_case_ = new_node
def lowerCAmelCase__ ( self , a__ ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ = self._locate_node(a__ )
if node is not None:
return node.value
return None
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 1_2 )
skip_list.insert("Key3" , 4_1 )
skip_list.insert("Key4" , -1_9 )
snake_case_ = skip_list.head
snake_case_ = {}
while node.level != 0:
snake_case_ = node.forward[0]
snake_case_ = node.value
assert len(snake_case ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = SkipList()
skip_list.insert("Key1" , 1_0 )
skip_list.insert("Key1" , 1_2 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 1_0 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 1_0 )
snake_case_ = skip_list.head
snake_case_ = {}
while node.level != 0:
snake_case_ = node.forward[0]
snake_case_ = node.value
if len(snake_case ) != 4:
print()
assert len(snake_case ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = SkipList()
assert skip_list.find("Some key" ) is None
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = SkipList()
skip_list.insert("Key2" , 2_0 )
assert skip_list.find("Key2" ) == 2_0
skip_list.insert("Some Key" , 1_0 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 1_3 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 1_0
assert skip_list.find("V" ) == 1_3
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 1_4
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4_2 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("X" )
def traverse_keys(snake_case : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def UpperCamelCase_( ):
'''simple docstring'''
def is_sorted(snake_case : Any ):
return all(next_item >= item for item, next_item in zip(snake_case , lst[1:] ) )
snake_case_ = SkipList()
for i in range(1_0 ):
skip_list.insert(snake_case , snake_case )
assert is_sorted(list(snake_case ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(snake_case ) )
def UpperCamelCase_( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 400 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = "▁"
_SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE : int = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ) -> None:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model ) + self.fairseq_offset
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
'''simple docstring'''
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a__ ) -> Any:
'''simple docstring'''
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(a__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = "".join(a__ ).replace(a__ , " " ).strip()
return out_string
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 400 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowercase : Tuple ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase : List[str] ={
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_lowercase : int ={
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
_lowercase : Tuple ={
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Tuple = VOCAB_FILES_NAMES
lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] = ElectraTokenizer
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : int="[PAD]" , SCREAMING_SNAKE_CASE__ : Dict="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Tuple:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
A : Union[str, Any] =getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('type' ) )
A : Any =do_lower_case
A : Any =strip_accents
A : int =tokenize_chinese_chars
A : Tuple =normalizer_class(**SCREAMING_SNAKE_CASE__ )
A : Tuple =do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=None ) -> Tuple:
A : int =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
A : List[str] =[self.sep_token_id]
A : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
A : Dict =self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 661 | _lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 512 , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = 7.5 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(SCREAMING_SNAKE_CASE__ )}.""" )
# get prompt text embeddings
A__ = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ = text_embeddings.shape
A__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
A__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ = 42
if negative_prompt is None:
A__ = [""]
elif type(SCREAMING_SNAKE_CASE__ ) is not type(SCREAMING_SNAKE_CASE__ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE__ )} !="""
f""" {type(SCREAMING_SNAKE_CASE__ )}.""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE__ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
A__ = negative_prompt
A__ = text_input_ids.shape[-1]
A__ = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="pt" , )
A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = uncond_embeddings.shape[1]
A__ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
A__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="cpu" , dtype=SCREAMING_SNAKE_CASE__ ).to(self.device )
A__ = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="cpu" , dtype=SCREAMING_SNAKE_CASE__ ).to(
self.device )
else:
A__ = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
A__ = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A__ = latents_reference.to(self.device )
A__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A__ = (latents_shape[3] - latents_shape_reference[3]) // 2
A__ = (latents_shape[2] - latents_shape_reference[2]) // 2
A__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A__ = 0 if dx < 0 else dx
A__ = 0 if dy < 0 else dy
A__ = max(-dx , 0 )
A__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# predict the noise residual
A__ = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ = noise_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = 1 / 0.1_8_2_1_5 * latents
A__ = self.vae.decode(SCREAMING_SNAKE_CASE__ ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A__ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) , return_tensors="pt" ).to(
self.device )
A__ , A__ = self.safety_checker(
images=SCREAMING_SNAKE_CASE__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A__ = None
if output_type == "pil":
A__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE__ , nsfw_content_detected=SCREAMING_SNAKE_CASE__ )
| 104 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : list[list[int]] ) -> Tuple:
_UpperCamelCase =TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(UpperCamelCase__ ) != 0:
_UpperCamelCase =len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(UpperCamelCase__ ) != cols:
raise error
for value in row:
if not isinstance(UpperCamelCase__ , (int, float) ):
raise error
_UpperCamelCase =rows
else:
_UpperCamelCase =[]
def UpperCamelCase__ ( self : Optional[int] ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCamelCase__ ( self : int ) -> int:
return len(self.rows )
@property
def UpperCamelCase__ ( self : Union[str, Any] ) -> int:
return len(self.rows[0] )
@property
def UpperCamelCase__ ( self : Union[str, Any] ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCamelCase__ ( self : List[Any] ) -> bool:
return self.order[0] == self.order[1]
def UpperCamelCase__ ( self : Dict ) -> Matrix:
_UpperCamelCase =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(UpperCamelCase__ )
def UpperCamelCase__ ( self : Tuple ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCamelCase__ ( self : Union[str, Any] ) -> bool:
return bool(self.determinant() )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
_UpperCamelCase =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(UpperCamelCase__ ).determinant()
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(UpperCamelCase__ , UpperCamelCase__ )
return -1 * self.get_minor(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] ) -> Matrix:
return Matrix(
[
[self.get_minor(UpperCamelCase__ , UpperCamelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCamelCase__ ( self : List[Any] ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCamelCase__ ( self : Optional[int] ) -> Matrix:
_UpperCamelCase =[
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(UpperCamelCase__ )
def UpperCamelCase__ ( self : Any ) -> Matrix:
_UpperCamelCase =self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Union[str, Any] ) -> str:
return str(self.rows )
def __str__( self : List[str] ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(UpperCamelCase__ ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int | None = None ) -> None:
_UpperCamelCase =TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise type_error
for value in row:
if not isinstance(UpperCamelCase__ , (int, float) ):
raise type_error
if len(UpperCamelCase__ ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(UpperCamelCase__ )
else:
_UpperCamelCase =self.rows[0:position] + [row] + self.rows[position:]
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : list[int] , UpperCamelCase__ : int | None = None ) -> None:
_UpperCamelCase =TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise type_error
for value in column:
if not isinstance(UpperCamelCase__ , (int, float) ):
raise type_error
if len(UpperCamelCase__ ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
_UpperCamelCase =[self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_UpperCamelCase =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Any , UpperCamelCase__ : object ) -> bool:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Dict , UpperCamelCase__ : object ) -> bool:
return not self == other
def __neg__( self : Union[str, Any] ) -> Matrix:
return self * -1
def __add__( self : List[Any] , UpperCamelCase__ : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , UpperCamelCase__ : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : List[Any] , UpperCamelCase__ : Matrix | int | float ) -> Matrix:
if isinstance(UpperCamelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(UpperCamelCase__ , UpperCamelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self : Any , UpperCamelCase__ : int ) -> Matrix:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
_UpperCamelCase =self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCamelCase__ ( cls : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] ) -> int:
return sum(row[i] * column[i] for i in range(len(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404 | 0 |
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = multiprocessing.Manager()
UpperCamelCase = manager.list()
UpperCamelCase = multiprocessing.Process(target=_SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCamelCase = shutil.rmtree
UpperCamelCase = os.rmdir
UpperCamelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCamelCase = {}
with swallow_io():
with time_limit(_SCREAMING_SNAKE_CASE ):
exec(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
UpperCamelCase = rmtree
UpperCamelCase = rmdir
UpperCamelCase = chdir
@contextlib.contextmanager
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def signal_handler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , _SCREAMING_SNAKE_CASE )
signal.signal(signal.SIGALRM , _SCREAMING_SNAKE_CASE )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def a__ ( ):
"""simple docstring"""
UpperCamelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_SCREAMING_SNAKE_CASE ):
with contextlib.redirect_stderr(_SCREAMING_SNAKE_CASE ):
with redirect_stdin(_SCREAMING_SNAKE_CASE ):
yield
@contextlib.contextmanager
def a__ ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(_SCREAMING_SNAKE_CASE ):
yield dirname
class _lowerCamelCase ( _lowercase ):
pass
class _lowerCamelCase ( io.StringIO ):
def snake_case_ (self , *__a , **__a ) -> Union[str, Any]:
raise OSError
def snake_case_ (self , *__a , **__a ) -> str:
raise OSError
def snake_case_ (self , *__a , **__a ) -> List[str]:
raise OSError
def snake_case_ (self , *__a , **__a ) -> List[Any]:
return False
class _lowerCamelCase ( contextlib._RedirectStream ): # type: ignore
UpperCAmelCase_ = "stdin"
@contextlib.contextmanager
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root == ".":
yield
return
UpperCamelCase = os.getcwd()
os.chdir(_SCREAMING_SNAKE_CASE )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCamelCase = None
UpperCamelCase = None
import os
UpperCamelCase = "1"
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
import shutil
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
import subprocess
UpperCamelCase = None # type: ignore
UpperCamelCase = None
import sys
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
| 714 |
"""simple docstring"""
import argparse
from collections import defaultdict
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = F"class {class_name}("
UpperCamelCase = F"{4 * ' '}def {test_name}("
UpperCamelCase = F"{8 * ' '}{correct_line.split()[0]}"
UpperCamelCase = F"{16 * ' '}{correct_line.split()[0]}"
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
UpperCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCamelCase = UpperCamelCase = UpperCamelCase = UpperCamelCase = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCamelCase = {l.strip() for l in f.readlines()}
else:
UpperCamelCase = None
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 544 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
lowerCamelCase__ = {
"""camembert-base""": 512,
}
lowerCamelCase__ = """▁"""
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ :str = ['input_ids', 'attention_mask']
def __init__( self : int , __a : int , __a : Tuple="<s>" , __a : List[str]="</s>" , __a : int="</s>" , __a : Any="<s>" , __a : Union[str, Any]="<unk>" , __a : List[Any]="<pad>" , __a : Optional[Any]="<mask>" , __a : Any=["<s>NOTUSED", "</s>NOTUSED"] , __a : Any = None , **__a : Any , ) -> Dict:
_UpperCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
_UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
_UpperCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
_UpperCamelCase : Optional[Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_UpperCamelCase : int = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
_UpperCamelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids )
_UpperCamelCase : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_UpperCamelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any , __a : Tuple = None ) -> Dict:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Tuple = [self.cls_token_id]
_UpperCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self : str , __a : Union[str, Any] , __a : List[Any] = None , __a : Optional[int] = False ) -> Optional[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict , __a : Any = None ) -> Dict:
_UpperCamelCase : List[Any] = [self.sep_token_id]
_UpperCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Any = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Optional[int] ) -> Optional[Any]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Any ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[int] ) -> Any:
_UpperCamelCase : Tuple = []
_UpperCamelCase : Optional[Any] = """"""
_UpperCamelCase : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
_UpperCamelCase : List[str] = True
_UpperCamelCase : List[Any] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase : Optional[int] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ) -> Any:
_UpperCamelCase : List[str] = self.__dict__.copy()
_UpperCamelCase : int = None
return state
def __setstate__( self : Tuple , __a : Union[str, Any] ) -> Optional[int]:
_UpperCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : str , __a : List[str] , __a : Tuple = None ) -> List[Any]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fi:
_UpperCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 624 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a__ ( a , a , a ) -> str:
# Initialise PyTorch model
A_ : str = AlbertConfig.from_json_file(a )
print(f"""Building PyTorch model from configuration: {config}""" )
A_ : int = AlbertForPreTraining(a )
# Load weights from tf checkpoint
load_tf_weights_in_albert(a , a , a )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 236 | import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : str = 10
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = [1, 2, 3, 4]
A_ : Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A_ : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A_ : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
A_ , A_ : Optional[Any] = process_story(__magic_name__ )
self.assertEqual(__magic_name__ , [] )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = ''''''
A_ , A_ : Union[str, Any] = process_story(__magic_name__ )
self.assertEqual(__magic_name__ , [] )
self.assertEqual(__magic_name__ , [] )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
A_ , A_ : Optional[int] = process_story(__magic_name__ )
A_ : List[str] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__magic_name__ , __magic_name__ )
A_ : Union[str, Any] = ['''It was the best of times.''']
self.assertEqual(__magic_name__ , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[str] = torch.tensor([1, 2, 3, 4] )
A_ : Any = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A_ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[int] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A_ : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = 101
A_ : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A_ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A_ : Optional[Any] = compute_token_type_ids(__magic_name__ , __magic_name__ )
np.testing.assert_array_equal(__magic_name__ , __magic_name__ )
| 236 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , _lowercase : Optional[Any] , _lowercase : str=13 , _lowercase : Optional[Any]=7 , _lowercase : Tuple=True , _lowercase : Optional[int]=True , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=True , _lowercase : List[Any]=99 , _lowercase : Optional[int]=32 , _lowercase : str=5 , _lowercase : Optional[int]=4 , _lowercase : Any=37 , _lowercase : Dict="gelu" , _lowercase : Dict=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=5_12 , _lowercase : int=16 , _lowercase : Tuple=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Any=4 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_attention_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_choices
def a ( self : int ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_attention_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a ( self : Optional[int] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def a ( self : Dict ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = True
__UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : Optional[Any] = True
a__ : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a ( self : List[str] ):
__UpperCAmelCase = FlaxRobertaModelTester(self )
@slow
def a ( self : Tuple ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=_lowercase )
__UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 49 | '''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int]=13 , A : Tuple=7 , A : str=True , A : Union[str, Any]=True , A : str=True , A : Any=True , A : Optional[Any]=99 , A : List[str]=24 , A : Dict=2 , A : int=6 , A : Any=37 , A : Any="gelu" , A : str=0.1 , A : Dict=0.1 , A : List[Any]=512 , A : Union[str, Any]=16 , A : Any=2 , A : Optional[int]=0.02 , A : str=3 , A : List[Any]=None , A : str=1000 , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Tuple = seq_length
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Union[str, Any] = use_input_mask
_UpperCAmelCase : Union[str, Any] = use_token_type_ids
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = num_labels
_UpperCAmelCase : Union[str, Any] = scope
_UpperCAmelCase : List[str] = range_bbox
def _A ( self : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase : int = bbox[i, j, 3]
_UpperCAmelCase : Optional[Any] = bbox[i, j, 1]
_UpperCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase : List[str] = bbox[i, j, 2]
_UpperCAmelCase : Optional[Any] = bbox[i, j, 0]
_UpperCAmelCase : str = t
_UpperCAmelCase : List[Any] = None
if self.use_input_mask:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase : Any = None
if self.use_token_type_ids:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self : List[Any] ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _A ( self : Any , A : List[Any] , A : List[str] , A : Optional[Any] , A : Union[str, Any] , A : Union[str, Any] , A : Tuple , A : Optional[int] , ):
_UpperCAmelCase : Optional[int] = LiltModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , bbox=A , attention_mask=A , token_type_ids=A )
_UpperCAmelCase : Dict = model(A , bbox=A , token_type_ids=A )
_UpperCAmelCase : str = model(A , bbox=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self : Any , A : List[Any] , A : Optional[int] , A : str , A : List[str] , A : Union[str, Any] , A : Dict , A : int , ):
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Optional[int] = LiltForTokenClassification(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(
A , bbox=A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : int , A : Optional[int] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : Dict , A : Any , A : Dict , ):
_UpperCAmelCase : Tuple = LiltForQuestionAnswering(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(
A , bbox=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : int ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCAmelCase : Optional[int] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase: List[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase: Dict = False
__UpperCamelCase: Optional[Any] = False
def _A ( self : str , A : int , A : Dict , A : List[Any] , A : List[str] , A : int ):
return True
def _A ( self : Dict ):
_UpperCAmelCase : Dict = LiltModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A , hidden_size=37 )
def _A ( self : int ):
self.config_tester.run_common_tests()
def _A ( self : List[str] ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _A ( self : int ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : str = type
self.model_tester.create_and_check_model(*A )
def _A ( self : str ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def _A ( self : List[Any] ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = LiltModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@slow
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
_UpperCAmelCase : str = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A )
_UpperCAmelCase : Optional[Any] = torch.tensor([[1, 2]] , device=A )
_UpperCAmelCase : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : int = model(input_ids=A , bbox=A )
_UpperCAmelCase : Optional[int] = torch.Size([1, 2, 768] )
_UpperCAmelCase : Dict = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=A , )
self.assertTrue(outputs.last_hidden_state.shape , A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A , atol=1E-3 ) )
| 244 | 0 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
lowerCAmelCase_ : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase_ : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
lowerCAmelCase_ : List[Any] = np.concatenate(lowerCAmelCase__ , axis=0 )
lowerCAmelCase_ : Any = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
lowerCAmelCase_ : Optional[Any] = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase_ : List[Any] = 2.0 * image - 1.0
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase_ : Dict = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : str=0.9995 ) -> str:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , np.ndarray ):
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Any = va.device
lowerCAmelCase_ : List[str] = va.cpu().numpy()
lowerCAmelCase_ : Union[str, Any] = va.cpu().numpy()
lowerCAmelCase_ : int = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
lowerCAmelCase_ : Any = (1 - t) * va + t * va
else:
lowerCAmelCase_ : Tuple = np.arccos(lowerCAmelCase__ )
lowerCAmelCase_ : int = np.sin(lowerCAmelCase__ )
lowerCAmelCase_ : str = theta_a * t
lowerCAmelCase_ : Union[str, Any] = np.sin(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCAmelCase_ : Union[str, Any] = sin_theta_t / sin_theta_a
lowerCAmelCase_ : int = sa * va + sa * va
if inputs_are_torch:
lowerCAmelCase_ : str = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = F.normalize(lowerCAmelCase__ , dim=-1 )
lowerCAmelCase_ : Union[str, Any] = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str:
"""simple docstring"""
for param in model.parameters():
lowerCAmelCase_ : List[Any] = value
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : AutoencoderKL , SCREAMING_SNAKE_CASE_ : CLIPTextModel , SCREAMING_SNAKE_CASE_ : CLIPModel , SCREAMING_SNAKE_CASE_ : CLIPTokenizer , SCREAMING_SNAKE_CASE_ : UNetaDConditionModel , SCREAMING_SNAKE_CASE_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , SCREAMING_SNAKE_CASE_ : CLIPFeatureExtractor , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : str=None , ):
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Any = (
feature_extractor.size
if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ )
else feature_extractor.size['shortest_edge']
)
lowerCAmelCase_ : str = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ )
set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase_ : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
# get the original timestep using init_timestep
lowerCAmelCase_ : Optional[int] = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str]=None ):
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase_ : Tuple = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[str] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
lowerCAmelCase_ : Optional[int] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
else:
lowerCAmelCase_ : int = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCAmelCase_ : Optional[int] = 0.1_82_15 * init_latents
lowerCAmelCase_ : Any = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
lowerCAmelCase_ : str = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
lowerCAmelCase_ : List[str] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = init_latents
return latents
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : int = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCAmelCase_ : str = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCAmelCase_ : Dict = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : List[str] = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCAmelCase_ : Tuple = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase_ : Union[str, Any] = latents.detach().requires_grad_()
lowerCAmelCase_ : Dict = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCAmelCase_ : Tuple = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCAmelCase_ : Dict = self.scheduler.alphas_cumprod[timestep]
lowerCAmelCase_ : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase_ : Tuple = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCAmelCase_ : Dict = torch.sqrt(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[Any] = self.scheduler.sigmas[index]
lowerCAmelCase_ : List[str] = latents - sigma * noise_pred
else:
raise ValueError(F"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCAmelCase_ : str = 1 / 0.1_82_15 * sample
lowerCAmelCase_ : Tuple = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase_ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ : Tuple = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype )
lowerCAmelCase_ : List[Any] = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale
lowerCAmelCase_ : List[str] = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[str] = latents.detach() + grads * (sigma**2)
lowerCAmelCase_ : str = noise_pred_original
else:
lowerCAmelCase_ : str = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE_ : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = 5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[int] = 5_1_2 , SCREAMING_SNAKE_CASE_ : float = 0.6 , SCREAMING_SNAKE_CASE_ : Optional[int] = 5_0 , SCREAMING_SNAKE_CASE_ : Optional[float] = 7.5 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Optional[float] = 1_0_0 , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : float = 0.8 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(F"You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1:
lowerCAmelCase_ : Dict = [generator] + [None] * (batch_size - 1)
lowerCAmelCase_ : Tuple = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
lowerCAmelCase_ : List[Any] = [x[0] for x in coca_is_none if x[1]]
lowerCAmelCase_ : Optional[Any] = ', '.join(SCREAMING_SNAKE_CASE_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"Content prompt is None and CoCa [{coca_is_none_str}] is None."
F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
lowerCAmelCase_ : Union[str, Any] = self.get_image_description(SCREAMING_SNAKE_CASE_ )
if style_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"Style prompt is None and CoCa [{coca_is_none_str}] is None."
F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
lowerCAmelCase_ : str = self.get_image_description(SCREAMING_SNAKE_CASE_ )
# get prompt text embeddings for content and style
lowerCAmelCase_ : Optional[int] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCAmelCase_ : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCAmelCase_ : int = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
lowerCAmelCase_ : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCAmelCase_ : Optional[int] = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
lowerCAmelCase_ : Optional[int] = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# set timesteps
lowerCAmelCase_ : Optional[Any] = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCAmelCase_ : Optional[Any] = {}
if accepts_offset:
lowerCAmelCase_ : Optional[int] = 1
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCAmelCase_ ,lowerCAmelCase_ : Any = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
lowerCAmelCase_ : Optional[int] = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# Preprocess image
lowerCAmelCase_ : Union[str, Any] = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if clip_guidance_scale > 0:
lowerCAmelCase_ : Optional[Any] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = slerp(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase_ : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase_ : List[str] = content_text_input.input_ids.shape[-1]
lowerCAmelCase_ : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCAmelCase_ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCAmelCase_ : Any = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase_ : Optional[int] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase_ : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCAmelCase_ : List[str] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device='cpu' , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
lowerCAmelCase_ : Optional[int] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase_ : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase_ : List[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ : str = {}
if accepts_eta:
lowerCAmelCase_ : Dict = eta
# check if the scheduler accepts generator
lowerCAmelCase_ : Tuple = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCAmelCase_ : List[str] = generator
with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ):
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ : Union[str, Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
lowerCAmelCase_ : str = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = noise_pred.chunk(2 )
lowerCAmelCase_ : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCAmelCase_ : Dict = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCAmelCase_ ,lowerCAmelCase_ : int = self.cond_fn(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : Any = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCAmelCase_ : Optional[int] = 1 / 0.1_82_15 * latents
lowerCAmelCase_ : List[Any] = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ : Optional[int] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 317 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Any = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCAmelCase_ : Optional[Any] = k.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if k.startswith('encoder' ):
lowerCAmelCase_ : List[Any] = k.replace('.attn' , '.self_attn' )
lowerCAmelCase_ : str = k.replace('norm1' , 'self_attn_layer_norm' )
lowerCAmelCase_ : Tuple = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
lowerCAmelCase_ : Tuple = k.replace('norm1' , 'self_attn_layer_norm' )
lowerCAmelCase_ : Optional[Any] = k.replace('norm2' , 'encoder_attn_layer_norm' )
lowerCAmelCase_ : Union[str, Any] = k.replace('norm3' , 'final_layer_norm' )
return k
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
lowerCAmelCase_ : List[str] = sd.pop(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
lowerCAmelCase_ : List[Any] = v
lowercase__ : Dict = ["""START"""]
@torch.no_grad()
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = torch.load(lowerCAmelCase__ , map_location='cpu' )
lowerCAmelCase_ : List[str] = model['model']
lowerCAmelCase_ : List[Any] = BlenderbotConfig.from_json_file(lowerCAmelCase__ )
lowerCAmelCase_ : int = BlenderbotForConditionalGeneration(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = m.model.state_dict().keys()
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCAmelCase_ : Tuple = rename_state_dict_key(lowerCAmelCase__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCAmelCase_ : List[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase__ )
m.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
m.half()
m.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
lowercase__ : Tuple = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 317 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class snake_case (lowerCAmelCase_ ):
lowerCAmelCase__ :List[str] = "umt5"
lowerCAmelCase__ :List[str] = ["past_key_values"]
def __init__( self ,UpperCAmelCase_=250_112 ,UpperCAmelCase_=512 ,UpperCAmelCase_=64 ,UpperCAmelCase_=1_024 ,UpperCAmelCase_=8 ,UpperCAmelCase_=None ,UpperCAmelCase_=6 ,UpperCAmelCase_=32 ,UpperCAmelCase_=128 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=1.0 ,UpperCAmelCase_="gated-gelu" ,UpperCAmelCase_=True ,UpperCAmelCase_=True ,UpperCAmelCase_="T5Tokenizer" ,UpperCAmelCase_=True ,UpperCAmelCase_=0 ,UpperCAmelCase_=1 ,UpperCAmelCase_=0 ,**UpperCAmelCase_ ,) -> int:
super().__init__(
is_encoder_decoder=UpperCAmelCase_ ,tokenizer_class=UpperCAmelCase_ ,tie_word_embeddings=UpperCAmelCase_ ,pad_token_id=UpperCAmelCase_ ,eos_token_id=UpperCAmelCase_ ,decoder_start_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split("-" )
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == "gated"
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowercase__ = "gelu_new"
@property
def _a ( self ) -> List[Any]:
return self.d_model
@property
def _a ( self ) -> Any:
return self.num_heads
@property
def _a ( self ) -> List[str]:
return self.num_layers
class snake_case (lowerCAmelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _a ( self ) -> Optional[Any]:
lowercase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowercase__ = "past_encoder_sequence + sequence"
lowercase__ = {0: "batch"}
lowercase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowercase__ = {0: "batch", 1: "decoder_sequence"}
lowercase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ ,direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _a ( self ) -> Optional[int]:
return 13
@property
def _a ( self ) -> str:
return 5E-4
| 267 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''M-CLIP'''
def __init__( self : Dict , lowercase : Any=10_24 , lowercase : Optional[Any]=7_68 , **lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : Any = transformerDimSize
UpperCAmelCase : Optional[int] = imageDimSize
super().__init__(**lowercase )
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = MCLIPConfig
def __init__( self : Optional[Any] , lowercase : List[Any] , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
super().__init__(lowercase , *lowercase , **lowercase )
UpperCAmelCase : Union[str, Any] = XLMRobertaModel(lowercase )
UpperCAmelCase : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __lowerCAmelCase ( self : Dict , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.transformer(input_ids=lowercase , attention_mask=lowercase )[0]
UpperCAmelCase : Dict = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowercase ), embs
| 595 | 0 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case__ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(__lowerCAmelCase )
snake_case__ , snake_case__ : Optional[int] = XLMProphetNetForConditionalGeneration.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
else:
snake_case__ : Dict = ProphetNetForConditionalGenerationOld.from_pretrained(__lowerCAmelCase )
snake_case__ , snake_case__ : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase )
snake_case__ : int = ['''key_proj''', '''value_proj''', '''query_proj''']
snake_case__ : Any = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
snake_case__ : Tuple = key.split('''.''' )
if attributes[0] == "lm_head":
snake_case__ : List[Any] = prophet
snake_case__ : Optional[Any] = prophet_old
else:
snake_case__ : Any = prophet.prophetnet
snake_case__ : Tuple = prophet_old.model
snake_case__ : str = False
for attribute in attributes:
if attribute in mapping:
snake_case__ : str = mapping[attribute]
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) > 0:
snake_case__ : Tuple = attribute
elif hasattr(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : str = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case__ : int = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
snake_case__ : Optional[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case__ : str = old_model.bias
logger.info(F"""{attribute} is initialized""" )
snake_case__ : List[str] = True
break
elif attribute in special_keys and hasattr(__lowerCAmelCase , '''in_proj_weight''' ):
snake_case__ : Any = old_model.in_proj_weight.shape[0] // 3
snake_case__ : Optional[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case__ : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case__ : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case__ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case__ : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case__ : Optional[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case__ : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case__ : Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
snake_case__ : int = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
snake_case__ : Union[str, Any] = True
break
if attribute.isdigit():
snake_case__ : List[str] = model[int(__lowerCAmelCase )]
snake_case__ : Dict = old_model[int(__lowerCAmelCase )]
else:
snake_case__ : Optional[int] = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_attribute == "":
snake_case__ : str = old_model
else:
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
snake_case__ : Tuple = getattr(__lowerCAmelCase , __lowerCAmelCase )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase__ : int = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 502 |
'''simple docstring'''
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
snake_case__ , snake_case__ : List[Any] = 0, 1
while True:
snake_case__ , snake_case__ : str = b, a + b
yield b
def _a ( __lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
snake_case__ : Optional[int] = 1
snake_case__ : Tuple = fibonacci_generator()
while len(str(next(__lowerCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 502 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : str , *UpperCamelCase__ : int , **UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Dict , *UpperCamelCase__ : Any , **UpperCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : int , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : Union[str, Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : int , **UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> int:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Dict , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : Union[str, Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : int , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : List[str] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : str , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : Tuple , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ) -> str:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> int:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
class snake_case__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""flax"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
requires_backends(self , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''flax'''] )
| 638 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowercase__ = get_logger(__name__)
class snake_case__ ( enum.Enum ):
"""simple docstring"""
lowerCamelCase = """all_checks"""
lowerCamelCase = """basic_checks"""
lowerCamelCase = """no_checks"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> str:
'''simple docstring'''
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
snake_case : Any = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case : Union[str, Any] = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
if len(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) ) )
snake_case : Dict = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE__ ) )
logger.info('''All the splits matched successfully.''' )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True ) -> dict:
'''simple docstring'''
if record_checksum:
snake_case : Any = shaaaa()
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(SCREAMING_SNAKE_CASE__ )
snake_case : Any = m.hexdigest()
else:
snake_case : Tuple = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE__ ), "checksum": checksum}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 638 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCAmelCase ={
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
UpperCAmelCase ={
"RUCAIBox/mvp": 1_024,
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = MvpTokenizer
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="replace" ,lowerCamelCase_="<s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="<s>" ,lowerCamelCase_="<unk>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_="<mask>" ,lowerCamelCase_=False ,lowerCamelCase_=True ,**lowerCamelCase_ ,) -> Any:
super().__init__(
lowerCamelCase_ ,lowerCamelCase_ ,tokenizer_file=lowerCamelCase_ ,errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,trim_offsets=lowerCamelCase_ ,**lowerCamelCase_ ,)
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,lowerCamelCase_ ) != add_prefix_space:
A = getattr(lowerCamelCase_ ,pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**lowerCamelCase_ )
A = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A = """post_processor"""
A = getattr(self.backend_tokenizer ,lowerCamelCase_ ,lowerCamelCase_ )
if tokenizer_component_instance:
A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A = tuple(state["""sep"""] )
if "cls" in state:
A = tuple(state["""cls"""] )
A = False
if state.get("""add_prefix_space""" ,lowerCamelCase_ ) != add_prefix_space:
A = add_prefix_space
A = True
if state.get("""trim_offsets""" ,lowerCamelCase_ ) != trim_offsets:
A = trim_offsets
A = True
if changes_to_apply:
A = getattr(lowerCamelCase_ ,state.pop("""type""" ) )
A = component_class(**lowerCamelCase_ )
setattr(self.backend_tokenizer ,lowerCamelCase_ ,lowerCamelCase_ )
@property
def UpperCamelCase__ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> str:
A = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else value
A = value
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> BatchEncoding:
A = kwargs.get("""is_split_into_words""" ,lowerCamelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> BatchEncoding:
A = kwargs.get("""is_split_into_words""" ,lowerCamelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
A = self._tokenizer.model.save(lowerCamelCase_ ,name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ) -> Dict:
A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 255 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase =list[tuple[int, int]]
UpperCAmelCase =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase =([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> int:
A = pos_x
A = pos_y
A = (pos_y, pos_x)
A = goal_x
A = goal_y
A = g_cost
A = parent
A = self.calculate_heuristic()
def UpperCamelCase__ ( self ) -> float:
A = abs(self.pos_x - self.goal_x )
A = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self ,lowerCamelCase_ ) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,lowerCamelCase_ )
A = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9_9_9_9 ,lowerCamelCase_ )
A = [self.start]
A = []
A = False
def UpperCamelCase__ ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A = True
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
A = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
A = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> list[Node]:
A = []
for action in delta:
A = parent.pos_x + action[1]
A = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_ ,lowerCamelCase_ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,lowerCamelCase_ ,) )
return successors
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Path:
A = node
A = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase =(0, 0)
UpperCAmelCase =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
UpperCAmelCase =GreedyBestFirst(init, goal)
UpperCAmelCase =greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase =2
for elem in grid:
print(elem)
| 255 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(A_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : List[str] = self.dummy_uncond_unet
UpperCamelCase : List[str] = DDIMScheduler()
UpperCamelCase : Union[str, Any] = self.dummy_vq_model
UpperCamelCase : str = LDMPipeline(unet=A_ , vqvae=A_ , scheduler=A_ )
ldm.to(A_ )
ldm.set_progress_bar_config(disable=A_ )
UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase : Tuple = ldm(generator=A_ , num_inference_steps=2 , output_type="numpy" ).images
UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = ldm(generator=A_ , num_inference_steps=2 , output_type="numpy" , return_dict=A_ )[0]
UpperCamelCase : Dict = image[0, -3:, -3:, -1]
UpperCamelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase : Tuple = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(A_ )
ldm.set_progress_bar_config(disable=A_ )
UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase : str = ldm(generator=A_ , num_inference_steps=5 , output_type="numpy" ).images
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCamelCase : List[str] = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
UpperCamelCase : Union[str, Any] = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 173 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A__ : List[Any] = random.Random()
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
if rng is None:
_lowercase: Any = global_rng
_lowercase: List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> List[Any]:
"""simple docstring"""
_lowercase: str = parent
_lowercase: int = batch_size
_lowercase: Tuple = min_seq_length
_lowercase: Any = max_seq_length
_lowercase: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowercase: Any = padding_value
_lowercase: List[str] = sampling_rate
_lowercase: Union[str, Any] = return_attention_mask
_lowercase: Optional[Any] = do_normalize
_lowercase: List[str] = feature_size
_lowercase: Optional[Any] = chunk_length
_lowercase: str = hop_length
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self , A_=False , A_=False ) -> List[Any]:
"""simple docstring"""
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
_lowercase: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowercase: Union[str, Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowercase: str = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = WhisperFeatureExtractor if is_speech_available() else None
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
_lowercase: int = WhisperFeatureExtractionTester(self )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase: Any = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
_lowercase: str = self.feature_extraction_class.from_pretrained(A_ )
_lowercase: Dict = feat_extract_first.to_dict()
_lowercase: List[str] = feat_extract_second.to_dict()
_lowercase: List[str] = feat_extract_first.mel_filters
_lowercase: str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase: Union[str, Any] = os.path.join(A_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A_ )
_lowercase: Tuple = self.feature_extraction_class.from_json_file(A_ )
_lowercase: int = feat_extract_first.to_dict()
_lowercase: Optional[int] = feat_extract_second.to_dict()
_lowercase: Tuple = feat_extract_first.mel_filters
_lowercase: Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowercase: Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowercase: List[str] = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
_lowercase: Dict = feature_extractor(A_ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowercase: Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowercase: Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
_lowercase: Union[str, Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: List[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowercase: Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowercase: Any = np.asarray(A_ )
_lowercase: Tuple = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: Dict = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
_lowercase: List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowercase: List[Any] = [np.asarray(A_ ) for speech_input in speech_inputs]
_lowercase: Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowercase: str = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
_lowercase: Optional[Any] = feature_extractor(A_ , return_tensors='''np''' ).input_features
_lowercase: List[str] = feature_extractor(A_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_lowercase: List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase: List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
_lowercase: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowercase: Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowercase: int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self , A_ ) -> List[Any]:
"""simple docstring"""
_lowercase: Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowercase: List[str] = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[str] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
_lowercase: Optional[Any] = self._load_datasamples(1 )
_lowercase: Optional[int] = WhisperFeatureExtractor()
_lowercase: Dict = feature_extractor(A_ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase: List[str] = self._load_datasamples(1 )[0]
_lowercase: Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_lowercase: Any = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 353 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Any , a : WhisperForConditionalGeneration , a : WhisperProcessor , a : AutoencoderKL , a : CLIPTextModel , a : CLIPTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a : StableDiffusionSafetyChecker , a : CLIPImageProcessor , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=a , speech_processor=a , vae=a , text_encoder=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , )
def __UpperCamelCase ( self : str , a : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
SCREAMING_SNAKE_CASE : Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(a )
@torch.no_grad()
def __call__( self : Optional[Any] , a : List[str] , a : List[Any]=1_6000 , a : int = 512 , a : int = 512 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : int , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.speech_processor.feature_extractor(
a , return_tensors="pt" , sampling_rate=a ).input_features.to(self.device )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.speech_model.generate(a , max_length=48_0000 )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(a , skip_special_tokens=a , normalize=a )[
0
]
if isinstance(a , a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
elif isinstance(a , a ):
SCREAMING_SNAKE_CASE : List[Any] = len(a )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(a )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a , a ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(a )}." )
# get prompt text embeddings
SCREAMING_SNAKE_CASE : str = self.tokenizer(
a , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
SCREAMING_SNAKE_CASE : int = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE : Tuple = text_embeddings.shape
SCREAMING_SNAKE_CASE : Tuple = text_embeddings.repeat(1 , a , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE : Any = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="
F" {type(a )}." )
elif isinstance(a , a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
SCREAMING_SNAKE_CASE : str = negative_prompt
SCREAMING_SNAKE_CASE : Tuple = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(
a , padding="max_length" , max_length=a , truncation=a , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE : str = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE : List[str] = uncond_embeddings.repeat(1 , a , 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE : Dict = torch.randn(a , generator=a , device="cpu" , dtype=a ).to(
self.device )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
SCREAMING_SNAKE_CASE : List[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE : Optional[int] = eta
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
SCREAMING_SNAKE_CASE : Optional[int] = self.unet(a , a , encoder_hidden_states=a ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : Any = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a , a , a )
SCREAMING_SNAKE_CASE : Any = 1 / 0.1_8215 * latents
SCREAMING_SNAKE_CASE : Optional[Any] = self.vae.decode(a ).sample
SCREAMING_SNAKE_CASE : str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : List[Any] = self.numpy_to_pil(a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 713 |
import math
def lowerCamelCase__ ( _a , _a):
if (
not isinstance(_a , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1.")
return apparent_power * power_factor
def lowerCamelCase__ ( _a , _a):
if (
not isinstance(_a , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1.")
return apparent_power * math.sqrt(1 - power_factor**2)
if __name__ == "__main__":
import doctest
doctest.testmod() | 193 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
__lowercase : List[str] = StableDiffusionSAGPipeline
__lowercase : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : Optional[int] = False
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__snake_case = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> int:
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__snake_case = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = '''.'''
__snake_case = torch.manual_seed(0 )
__snake_case = sag_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = '''.'''
__snake_case = torch.manual_seed(0 )
__snake_case = sag_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sag_pipe.to(__SCREAMING_SNAKE_CASE )
sag_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = '''.'''
__snake_case = torch.manual_seed(0 )
__snake_case = sag_pipe(
[prompt] , width=768 , height=512 , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
__snake_case = output.images
assert image.shape == (1, 512, 768, 3)
| 24 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = sorted(zip(UpperCAmelCase__, UpperCAmelCase__ ), key=lambda UpperCAmelCase__ : x[0] / x[1], reverse=UpperCAmelCase__ )
A_ , A_ = [i[0] for i in r], [i[1] for i in r]
A_ = list(accumulate(UpperCAmelCase__ ) )
A_ = bisect(UpperCAmelCase__, UpperCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__ , A__ , A__=False ) -> int:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
snake_case__ : Tuple = os.path.abspath(A__ )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
snake_case__ : List[Any] = torch.load(A__ , map_location='cpu' )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
snake_case__ : Tuple = convert_pytorch_state_dict_to_flax(A__ , A__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
snake_case__ : int = convert_pytorch_sharded_state_dict_to_flax(A__ , A__ )
return flax_state_dict
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(A__ ) -> bool:
return len(set(A__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
snake_case__ : List[Any] = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(A__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
snake_case__ : str = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(A__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
snake_case__ : int = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(A__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
snake_case__ : Tuple = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(A__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case__ : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(A__ ):
snake_case__ : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case__ : Tuple = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(A__ ):
snake_case__ : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case__ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case__ : List[Any] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
snake_case__ : Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
snake_case__ : int = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
snake_case__ : List[str] = pt_tuple_key[-2] + '_v'
if name is not None:
snake_case__ : Union[str, Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase__ ( A__ , A__ ) -> Any:
# convert pytorch tensor to numpy
snake_case__ : Any = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case__ : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
snake_case__ : Union[str, Any] = flax_model.params['params']
else:
snake_case__ : Union[str, Any] = flax_model.params
snake_case__ : List[str] = flatten_dict(A__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case__ : Optional[int] = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(A__ )
snake_case__ : int = {}
snake_case__ : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
snake_case__ : Optional[int] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case__ : Any = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
snake_case__ : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case__ : int = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case__ , snake_case__ : int = rename_key_and_reshape_tensor(
A__ , A__ , A__ , A__ )
# add model prefix if necessary
snake_case__ : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case__ : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
snake_case__ : Optional[int] = jnp.asarray(A__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A__ , A__ )
continue
# also add unexpected weight so that warning is thrown
snake_case__ : Union[str, Any] = jnp.asarray(A__ )
else:
# also add unexpected weight so that warning is thrown
snake_case__ : Dict = jnp.asarray(A__ )
return unflatten_dict(A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
import torch
# Load the index
snake_case__ : int = {}
for shard_file in shard_filenames:
# load using msgpack utils
snake_case__ : Optional[int] = torch.load(A__ )
snake_case__ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case__ : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case__ : Union[str, Any] = flax_model.params['params']
snake_case__ : Any = flatten_dict(A__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
snake_case__ : Optional[int] = flax_model.params
snake_case__ : str = flatten_dict(A__ )
snake_case__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
snake_case__ : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case__ : Optional[int] = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
snake_case__ : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case__ : str = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case__ , snake_case__ : Tuple = rename_key_and_reshape_tensor(
A__ , A__ , A__ , A__ )
# add model prefix if necessary
snake_case__ : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case__ : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
snake_case__ : List[Any] = jnp.asarray(A__ )
continue
if "var" in flax_key[-1]:
snake_case__ : Optional[Any] = jnp.asarray(A__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A__ , A__ )
continue
# also add unexpected weight so that warning is thrown
snake_case__ : str = jnp.asarray(A__ )
else:
# also add unexpected weight so that warning is thrown
snake_case__ : Tuple = jnp.asarray(A__ )
return unflatten_dict(A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Dict:
snake_case__ : Any = os.path.abspath(A__ )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
snake_case__ : Optional[int] = getattr(A__ , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(A__ , 'rb' ) as state_f:
try:
snake_case__ : int = from_bytes(A__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(A__ , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[int]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
snake_case__ : Any = flatten_dict(jax.tree_util.tree_map(lambda A__ : x.dtype == jnp.bfloataa , A__ ) ).values()
if any(A__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
snake_case__ : Optional[Any] = jax.tree_util.tree_map(
lambda A__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A__ )
snake_case__ : List[Any] = flatten_dict(A__ )
snake_case__ : Any = pt_model.state_dict()
snake_case__ : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
snake_case__ : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
snake_case__ : List[str] = []
snake_case__ : Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case__ : str = flax_key_tuple[0] == pt_model.base_model_prefix
snake_case__ : Dict = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case__ : str = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
snake_case__ : int = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(A__ ) not in pt_model_dict:
# conv layer
snake_case__ : List[Any] = flax_key_tuple[:-1] + ('weight',)
snake_case__ : Optional[int] = jnp.transpose(A__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A__ ) not in pt_model_dict:
# linear layer
snake_case__ : Union[str, Any] = flax_key_tuple[:-1] + ('weight',)
snake_case__ : Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case__ : Optional[Any] = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
snake_case__ : Dict = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
snake_case__ : List[Any] = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
snake_case__ : Union[str, Any] = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
snake_case__ : Tuple = '.'.join(A__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
snake_case__ : Tuple = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
snake_case__ : List[Any] = key.split('.' )
snake_case__ : Tuple = None
if key_components[-3::2] == ["parametrizations", "original0"]:
snake_case__ : Optional[int] = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
snake_case__ : List[Any] = key_components[-2] + '_v'
if name is not None:
snake_case__ : List[str] = key_components[:-3] + [name]
snake_case__ : Optional[int] = '.'.join(A__ )
snake_case__ : Optional[int] = key
if flax_key in special_pt_names:
snake_case__ : str = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
snake_case__ : int = np.asarray(A__ ) if not isinstance(A__ , np.ndarray ) else flax_tensor
snake_case__ : Optional[Any] = torch.from_numpy(A__ )
# remove from missing keys
missing_keys.remove(A__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A__ )
pt_model.load_state_dict(A__ )
# re-transform missing_keys to list
snake_case__ : Union[str, Any] = list(A__ )
if len(A__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(A__ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'If your task is similar to the task the model of the checkpoint was trained on, '
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 699 | # tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ : Tuple = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A__ )
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Union[str, Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A__ , id=A__ )
| 699 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class snake_case__ ( UpperCamelCase__ ):
A__ = 0
A__ = False
A__ = 3.0
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=lowerCAmelCase__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'a': 2, 'c': 2.2_5} )
@require_cuda
def A_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__snake_case : Union[str, Any] = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__snake_case : int = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , lowerCAmelCase__ )
@require_multi_gpu
def A_ ( self : int ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
A__ : Dict = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
A__ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
A__ : Tuple = torch.nn.Linear(1_0_0, 2_0_0)
A__ : Tuple = accelerator.prepare(model)
# Check the values changed in kwargs
A__ : str = """"""
A__ : Optional[int] = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 286 |
def _A ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : List[str] =len(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
a__ : Optional[int] =True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
a__ : str =False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
a__ : str =subset[i - 1][j]
if arr[i - 1] <= j:
a__ : Tuple =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563 | 0 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
_UpperCAmelCase : List[Any] = ""
while len(__lowerCAmelCase ) % 3 != 0:
_UpperCAmelCase : Tuple = "0" + bin_string
_UpperCAmelCase : Dict = [
bin_string[index : index + 3]
for index in range(len(__lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_UpperCAmelCase : int = 0
for index, val in enumerate(__lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCAmelCase ) )
oct_string += str(__lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 40 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ["""input_features"""]
def __init__( self , __UpperCAmelCase=8_0 , __UpperCAmelCase=1_6_0_0_0 , __UpperCAmelCase=1_6_0 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Any = n_fft
lowerCAmelCase__ :List[Any] = hop_length
lowerCAmelCase__ :Tuple = chunk_length
lowerCAmelCase__ :Tuple = chunk_length * sampling_rate
lowerCAmelCase__ :Tuple = self.n_samples // hop_length
lowerCAmelCase__ :List[Any] = sampling_rate
lowerCAmelCase__ :Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__UpperCAmelCase , norm='slaney' , mel_scale='slaney' , )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = spectrogram(
__UpperCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
lowerCAmelCase__ :Dict = log_spec[:, :-1]
lowerCAmelCase__ :List[Any] = np.maximum(__UpperCAmelCase , log_spec.max() - 8.0 )
lowerCAmelCase__ :Optional[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase__ :Tuple = np.array(__UpperCAmelCase , np.intaa )
lowerCAmelCase__ :Any = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ):
lowerCAmelCase__ :Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase__ :List[Any] = padding_value
normed_input_values.append(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "max_length" , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ :Optional[int] = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCAmelCase__ :Optional[int] = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ :Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
lowerCAmelCase__ :Union[str, Any] = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ :List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ :Any = [np.asarray([raw_speech] ).T]
lowerCAmelCase__ :List[str] = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
lowerCAmelCase__ :str = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase__ :Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
lowerCAmelCase__ :Tuple = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
lowerCAmelCase__ :Tuple = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
lowerCAmelCase__ :Optional[Any] = [self._np_extract_fbank_features(__UpperCAmelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __UpperCAmelCase ):
lowerCAmelCase__ :Any = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase__ :Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase__ :Union[str, Any] = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase__ :List[Any] = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ :List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 93 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = 384
if "tiny" in model_name:
lowerCAmelCase__ :List[Any] = [3, 3, 9, 3]
lowerCAmelCase__ :Tuple = [96, 192, 384, 768]
if "small" in model_name:
lowerCAmelCase__ :Union[str, Any] = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [96, 192, 384, 768]
if "base" in model_name:
lowerCAmelCase__ :Dict = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [128, 256, 512, 1024]
lowerCAmelCase__ :Union[str, Any] = 512
if "large" in model_name:
lowerCAmelCase__ :int = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [192, 384, 768, 1536]
lowerCAmelCase__ :Optional[Any] = 768
if "xlarge" in model_name:
lowerCAmelCase__ :Optional[Any] = [3, 3, 27, 3]
lowerCAmelCase__ :str = [256, 512, 1024, 2048]
lowerCAmelCase__ :Union[str, Any] = 1024
# set label information
lowerCAmelCase__ :Tuple = 150
lowerCAmelCase__ :List[Any] = 'huggingface/label-files'
lowerCAmelCase__ :Tuple = 'ade20k-id2label.json'
lowerCAmelCase__ :Tuple = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :int = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ :List[str] = ConvNextConfig(
depths=_SCREAMING_SNAKE_CASE , hidden_sizes=_SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase__ :Union[str, Any] = UperNetConfig(
backbone_config=_SCREAMING_SNAKE_CASE , auxiliary_in_channels=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , )
return config
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = val
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Dict = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
lowerCAmelCase__ :List[Any] = model_name_to_url[model_name]
lowerCAmelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
lowerCAmelCase__ :List[Any] = get_upernet_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = UperNetForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase__ :Optional[int] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if "bn" in key:
lowerCAmelCase__ :Any = key.replace('bn' , 'batch_norm' )
lowerCAmelCase__ :int = val
# rename keys
lowerCAmelCase__ :Optional[Any] = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify on image
lowerCAmelCase__ :str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCAmelCase__ :Optional[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowerCAmelCase__ :Tuple = SegformerImageProcessor()
lowerCAmelCase__ :List[Any] = processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(_SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowerCAmelCase__ :Dict = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowerCAmelCase__ :List[str] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 93 | 1 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Any ) -> List[str]:
# Load checkpoint
_a = torch.load(lowercase , map_location="cpu" )
_a = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
_a = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_a = v
else:
_a = v
_a = chkpt["params"]
_a = {n: v for n, v in config.items() if not isinstance(lowercase , (torch.FloatTensor, numpy.ndarray) )}
_a = chkpt["dico_word2id"]
_a = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
_a = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_a = pytorch_dump_folder_path + "/" + CONFIG_NAME
_a = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowercase , lowercase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase , indent=2 ) + "\n" )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase , indent=2 ) + "\n" )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 521 |
'''simple docstring'''
import math
import sys
def _lowerCamelCase ( lowercase : str ) -> str:
_a = ""
try:
with open(lowercase , "rb" ) as binary_file:
_a = binary_file.read()
for dat in data:
_a = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _lowerCamelCase ( lowercase : str ) -> str:
_a = {"0": "0", "1": "1"}
_a , _a = "", ""
_a = len(lowercase )
for i in range(len(lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_a = lexicon[curr_string]
result += last_match_id
_a = last_match_id + "0"
if math.loga(lowercase ).is_integer():
_a = {}
for curr_key in list(lowercase ):
_a = lexicon.pop(lowercase )
_a = new_lex
_a = last_match_id + "1"
index += 1
_a = ""
return result
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> None:
_a = 8
try:
with open(lowercase , "wb" ) as opened_file:
_a = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase ) , lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _lowerCamelCase ( lowercase : str ) -> str:
_a = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_a = data_bits[counter:]
_a = data_bits[counter + 1 :]
return data_bits
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> None:
_a = read_file_binary(lowercase )
_a = remove_prefix(lowercase )
_a = decompress_data(lowercase )
write_file_binary(lowercase , lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 521 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'deberta-v2'
def __init__( self , _lowerCAmelCase=128100 , _lowerCAmelCase=1536 , _lowerCAmelCase=24 , _lowerCAmelCase=24 , _lowerCAmelCase=6144 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-7 , _lowerCAmelCase=False , _lowerCAmelCase=-1 , _lowerCAmelCase=0 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=0 , _lowerCAmelCase="gelu" , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : List[Any] = relative_attention
UpperCAmelCase__ : Tuple = max_relative_positions
UpperCAmelCase__ : List[str] = pad_token_id
UpperCAmelCase__ : Any = position_biased_input
# Backwards compatibility
if type(_lowerCAmelCase ) == str:
UpperCAmelCase__ : Tuple = [x.strip() for x in pos_att_type.lower().split("""|""" )]
UpperCAmelCase__ : Dict = pos_att_type
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Any = kwargs.get("""pooler_hidden_size""" , _lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = pooler_dropout
UpperCAmelCase__ : int = pooler_hidden_act
class UpperCAmelCase_ ( __lowerCamelCase ):
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
UpperCAmelCase__ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ : Tuple = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __UpperCAmelCase ( self ):
return 12
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = 3 , _lowerCAmelCase = 40 , _lowerCAmelCase = 40 , _lowerCAmelCase = None , ):
UpperCAmelCase__ : int = super().generate_dummy_inputs(preprocessor=_lowerCAmelCase , framework=_lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 79 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowercase ( unittest.TestCase ):
a = MODEL_FOR_MASKED_LM_MAPPING
a = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCamelCase_ ( self: Tuple ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
lowerCamelCase__ : Tuple = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
lowerCamelCase__ : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
lowerCamelCase__ : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[int] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
lowerCamelCase__ : int = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
lowerCamelCase__ : int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
lowerCamelCase__ : Any = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
lowerCamelCase__ : List[Any] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Optional[int] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
lowerCamelCase__ : int = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
@require_torch
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[int] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(UpperCamelCase__ )
@slow
@require_tf
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : str = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : Optional[Any] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] , )
lowerCamelCase__ : List[str] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
lowerCamelCase__ : List[str] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Optional[int] = None
self.run_pipeline_test(UpperCamelCase__ , [] )
@require_tf
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[int] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Optional[Any] = None
self.run_pipeline_test(UpperCamelCase__ , [] )
def lowerCamelCase_ ( self: int , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Tuple ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
lowerCamelCase__ : List[Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: List[str] ):
lowerCamelCase__ : str = fill_masker.tokenizer
lowerCamelCase__ : Tuple = fill_masker.model
lowerCamelCase__ : Union[str, Any] = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : List[Any] = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : Optional[int] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
UpperCamelCase__ , [
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
] , )
with self.assertRaises(UpperCamelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(UpperCamelCase__ ):
fill_masker("""This is""" )
self.run_test_top_k(UpperCamelCase__ , UpperCamelCase__ )
self.run_test_targets(UpperCamelCase__ , UpperCamelCase__ )
self.run_test_top_k_targets(UpperCamelCase__ , UpperCamelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(UpperCamelCase__ , UpperCamelCase__ )
self.fill_mask_with_multiple_masks(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: str ):
lowerCamelCase__ : Tuple = tokenizer.get_vocab()
lowerCamelCase__ : Any = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCamelCase__ : int = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , targets=UpperCamelCase__ )
lowerCamelCase__ : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , UpperCamelCase__ )
lowerCamelCase__ : Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(UpperCamelCase__ ) )
# Call argument
lowerCamelCase__ : Optional[int] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : Tuple = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(UpperCamelCase__ ) )
# Score equivalence
lowerCamelCase__ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
lowerCamelCase__ : List[str] = [top_mask["""token_str"""] for top_mask in outputs]
lowerCamelCase__ : List[str] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCamelCase__ ) == set(UpperCamelCase__ ):
lowerCamelCase__ : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
# Raises with invalid
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase__ : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase__ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase__ : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="""""" )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : int = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , top_k=2 )
lowerCamelCase__ : str = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
lowerCamelCase__ : Any = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
] , )
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Dict = tokenizer.get_vocab()
lowerCamelCase__ : List[Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# top_k=2, ntargets=3
lowerCamelCase__ : int = sorted(vocab.keys() )[:3]
lowerCamelCase__ : List[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=UpperCamelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCamelCase__ : List[Any] = [el["""token_str"""] for el in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x["score"] , reverse=UpperCamelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(UpperCamelCase__ ).issubset(UpperCamelCase__ ):
lowerCamelCase__ : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=UpperCamelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Dict = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCamelCase__ : int = sorted(vocab.keys() )[:3]
lowerCamelCase__ : List[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCamelCase__ : str = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=UpperCamelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(UpperCamelCase__ ) , 3 )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Dict ):
lowerCamelCase__ : str = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
lowerCamelCase__ : int = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
[
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
{"""sequence""": ANY(UpperCamelCase__ ), """score""": ANY(UpperCamelCase__ ), """token""": ANY(UpperCamelCase__ ), """token_str""": ANY(UpperCamelCase__ )},
],
] , )
| 631 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__a = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__a = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowercase ( ) ->Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = (
list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) )
)
lowercase : List[Any] = bs[:]
lowercase : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowercase : Any = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase, _UpperCamelCase ) )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : int = set()
lowercase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : str = char
return pairs
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ):
lowercase : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
lowercase : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
lowercase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
lowercase : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle:
lowercase : Optional[Any] = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Any = {v: k for k, v in self.encoder.items()}
lowercase : List[Any] = errors # how to handle errors in decoding
lowercase : Optional[Any] = bytes_to_unicode()
lowercase : int = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle:
lowercase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
lowercase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowercase : int = {}
lowercase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : Any = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __lowerCamelCase ( self ):
return len(self.encoder )
def __lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if token in self.cache:
return self.cache[token]
lowercase : Optional[Any] = tuple(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
lowercase : str = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : int = bigram
lowercase : Tuple = []
lowercase : Any = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
lowercase : Any = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Tuple = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : Tuple = tuple(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
lowercase : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = word
return word
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) )
return bpe_tokens
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = ''''''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : str = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' )
lowercase : Tuple = 0
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Dict = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Tuple = [self.cls_token_id]
lowercase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
lowercase : List[str] = ''' ''' + text
return (text, kwargs)
| 319 |
import math
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->float:
"""simple docstring"""
if (
not isinstance(_UpperCamelCase, (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->float:
"""simple docstring"""
if (
not isinstance(_UpperCamelCase, (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
__UpperCAmelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__UpperCAmelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__UpperCAmelCase = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case_ (__A : int , __A : int , __A : int ) -> str:
assert len(str(__A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__lowerCAmelCase : int = year // 1_0_0
__lowerCAmelCase : Dict = (5 * (century % 4) + 2) % 7
__lowerCAmelCase : List[Any] = year % 1_0_0
__lowerCAmelCase : Union[str, Any] = centurian % 1_2
__lowerCAmelCase : Union[str, Any] = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__lowerCAmelCase : Dict = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__lowerCAmelCase : Any = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase : ClassVar[Features] =Features({"audio": Audio()} )
lowerCamelCase : ClassVar[Features] =Features({"labels": ClassLabel} )
lowerCamelCase : str ="audio"
lowerCamelCase : str ="labels"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__lowerCAmelCase : Optional[int] = copy.deepcopy(self )
__lowerCAmelCase : Tuple = self.label_schema.copy()
__lowerCAmelCase : Optional[int] = features[self.label_column]
__lowerCAmelCase : int = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 218 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Optional[Any] , __A : Tuple=2 , __A : Dict=3_2 , __A : Union[str, Any]=1_6 , __A : Dict=3 , __A : Tuple=True , __A : Tuple=True , __A : Optional[int]=3_2 , __A : str=4 , __A : Any=[0, 1, 2, 3] , __A : int=4 , __A : int=3_7 , __A : int="gelu" , __A : int=0.1 , __A : Tuple=0.1 , __A : int=0.0_2 , __A : Optional[int]=3 , __A : Tuple=[1, 3_8_4, 2_4, 2_4] , __A : List[Any]=True , __A : int=None , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = backbone_out_indices
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = num_labels
_lowercase = backbone_featmap_shape
_lowercase = scope
_lowercase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowercase = (image_size // patch_size) ** 2
_lowercase = num_patches + 1
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowercase = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__A , backbone_featmap_shape=self.backbone_featmap_shape , )
def snake_case ( self : str , __A : List[Any] , __A : Optional[Any] , __A : int ):
"""simple docstring"""
_lowercase = DPTModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[Any] , __A : List[str] , __A : List[Any] , __A : int ):
"""simple docstring"""
_lowercase = self.num_labels
_lowercase = DPTForDepthEstimation(__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def snake_case ( self : Optional[int] , __A : int , __A : Optional[int] , __A : Union[str, Any] ):
"""simple docstring"""
_lowercase = self.num_labels
_lowercase = DPTForSemanticSegmentation(__A )
model.to(__A )
model.eval()
_lowercase = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase = DPTModelTester(self )
_lowercase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def snake_case ( self : List[Any] ):
"""simple docstring"""
pass
def snake_case ( self : str ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__A )
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
def snake_case ( self : List[Any] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
if model_class in get_values(__A ):
continue
_lowercase = model_class(__A )
model.to(__A )
model.train()
_lowercase = self._prepare_for_class(__A , __A , return_labels=__A )
_lowercase = model(**__A ).loss
loss.backward()
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = False
_lowercase = True
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
_lowercase = model_class(__A )
model.to(__A )
model.gradient_checkpointing_enable()
model.train()
_lowercase = self._prepare_for_class(__A , __A , return_labels=__A )
_lowercase = model(**__A ).loss
loss.backward()
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = _config_zero_init(__A )
for model_class in self.all_model_classes:
_lowercase = model_class(config=__A )
# Skip the check for the backbone
_lowercase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowercase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def snake_case ( self : List[Any] ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowercase = DPTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def snake_case ( self : int ):
"""simple docstring"""
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = "add"
with self.assertRaises(__A ):
_lowercase = DPTForDepthEstimation(__A )
def A__ ( ) -> Union[str, Any]:
_lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
"""simple docstring"""
_lowercase = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
_lowercase = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__A )
_lowercase = prepare_img()
_lowercase = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(**__A )
_lowercase = outputs.predicted_depth
# verify the predicted depth
_lowercase = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __A )
_lowercase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __A , atol=1e-4 ) )
| 497 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__magic_name__ : int = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , *__A : Any , **__A : List[Any] ):
"""simple docstring"""
super().__init__(*__A , **__A )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case ( self : Tuple , __A : str=None ):
"""simple docstring"""
_lowercase = {}
if top_k is not None:
_lowercase = top_k
return {}, {}, postprocess_params
def __call__( self : Union[str, Any] , __A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__A : Dict ):
"""simple docstring"""
return super().__call__(__A , **__A )
def snake_case ( self : Optional[int] , __A : List[Any] ):
"""simple docstring"""
_lowercase = load_image(__A )
_lowercase = self.image_processor(images=__A , return_tensors=self.framework )
return model_inputs
def snake_case ( self : Dict , __A : Optional[int] ):
"""simple docstring"""
_lowercase = self.model(**__A )
return model_outputs
def snake_case ( self : List[Any] , __A : str , __A : Tuple=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowercase = self.model.config.num_labels
if self.framework == "pt":
_lowercase = model_outputs.logits.softmax(-1 )[0]
_lowercase , _lowercase = probs.topk(__A )
elif self.framework == "tf":
_lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
_lowercase = tf.math.top_k(__A , k=__A )
_lowercase , _lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
_lowercase = scores.tolist()
_lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__A , __A )]
| 497 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[Any] = ["pixel_values"]
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , a = True , **a , ) -> None:
super().__init__(**a )
lowercase__ : Union[str, Any] = size if size is not None else {'shortest_edge': 2_2_4}
lowercase__ : Tuple = get_size_dict(a , default_to_square=a )
lowercase__ : List[str] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowercase__ : Optional[int] = get_size_dict(a , default_to_square=a , param_name='crop_size' )
lowercase__ : Union[str, Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : Optional[int] = resample
lowercase__ : Tuple = do_center_crop
lowercase__ : Tuple = crop_size
lowercase__ : Dict = do_rescale
lowercase__ : Any = rescale_factor
lowercase__ : Dict = do_normalize
lowercase__ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : List[Any] = do_convert_rgb
def _UpperCAmelCase ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : List[Any] = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a , ) -> np.ndarray:
lowercase__ : Tuple = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a , ) -> int:
return rescale(a , scale=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a , a = None , **a , ) -> np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Union[str, Any] = size if size is not None else self.size
lowercase__ : Any = get_size_dict(a , param_name='size' , default_to_square=a )
lowercase__ : str = resample if resample is not None else self.resample
lowercase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : str = get_size_dict(a , param_name='crop_size' , default_to_square=a )
lowercase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : Union[str, Any] = image_std if image_std is not None else self.image_std
lowercase__ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : int = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : int = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ : Any = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
lowercase__ : Optional[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
lowercase__ : Dict = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ : List[Any] = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ : str = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ : Tuple = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 716 | """simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _a ( UpperCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return (data["data"], data["target"])
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = load_iris()
lowerCAmelCase__ , lowerCAmelCase__ = data_handling(_lowercase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
lowerCAmelCase__ = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowerCAmelCase__ = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 339 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase = 3 , _lowercase = 7 , _lowercase = 1_00_00_00 ):
"""simple docstring"""
a__ = 0
a__ = 1
for current_denominator in range(1 , limit + 1 ):
a__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__ = current_numerator
a__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 331 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase_ = '''ChineseCLIPImageProcessor'''
lowerCAmelCase_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , __lowercase : str=None , __lowercase : Dict=None , **__lowercase : Dict ):
"""simple docstring"""
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowercase , )
snake_case_ = kwargs.pop("feature_extractor" )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowercase , __lowercase )
snake_case_ = self.image_processor
def __call__( self : Optional[Any] , __lowercase : Dict=None , __lowercase : Union[str, Any]=None , __lowercase : Any=None , **__lowercase : List[Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
snake_case_ = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
snake_case_ = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def snake_case__ ( self : int , *__lowercase : int , **__lowercase : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def snake_case__ ( self : List[Any] , *__lowercase : Dict , **__lowercase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case__ ( self : int ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowercase , )
return self.image_processor_class
| 139 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[Any]=13 , __lowercase : List[Any]=7 , __lowercase : List[str]=True , __lowercase : Optional[Any]=True , __lowercase : Any=True , __lowercase : Optional[int]=True , __lowercase : int=99 , __lowercase : str=24 , __lowercase : Tuple=2 , __lowercase : Union[str, Any]=6 , __lowercase : List[str]=37 , __lowercase : int="gelu" , __lowercase : List[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Any=5_12 , __lowercase : Optional[int]=16 , __lowercase : int=2 , __lowercase : Tuple=0.02 , __lowercase : int=3 , __lowercase : Union[str, Any]=None , __lowercase : List[str]=10_00 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = range_bbox
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self : str ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def snake_case__ ( self : List[str] , __lowercase : Any , __lowercase : Tuple , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : int , ):
"""simple docstring"""
snake_case_ = LiltModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
snake_case_ = model(__lowercase , bbox=__lowercase , token_type_ids=__lowercase )
snake_case_ = model(__lowercase , bbox=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self : Optional[int] , __lowercase : Dict , __lowercase : int , __lowercase : List[Any] , __lowercase : str , __lowercase : List[str] , __lowercase : Dict , __lowercase : Optional[Any] , ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LiltForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(
__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Any , __lowercase : int , __lowercase : Optional[Any] , ):
"""simple docstring"""
snake_case_ = LiltForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(
__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : List[Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : int ):
"""simple docstring"""
return True
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = LiltModelTester(self )
snake_case_ = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def snake_case__ ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*__lowercase )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
@slow
def snake_case__ ( self : Any ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LiltModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(__lowercase )
snake_case_ = torch.tensor([[1, 2]] , device=__lowercase )
snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowercase )
# forward pass
with torch.no_grad():
snake_case_ = model(input_ids=__lowercase , bbox=__lowercase )
snake_case_ = torch.Size([1, 2, 7_68] )
snake_case_ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , __lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowercase , atol=1E-3 ) )
| 139 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE_ = False
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_)
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = generator.manual_seed(0)
UpperCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = '''cyberpunk 2077'''
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
UpperCamelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
UpperCamelCase = '''A painting of a squirrel eating a burger '''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''').images
UpperCamelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
UpperCamelCase = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''').images
UpperCamelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 | 34 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def snake_case_ ( lowercase__ = 3 ):
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(lowercase__ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 1_0:
raise ValueError("number of qubits too large to simulate(>10)." )
UpperCAmelCase__ : Optional[int] = QuantumRegister(lowercase__ , "qr" )
UpperCAmelCase__ : List[Any] = ClassicalRegister(lowercase__ , "cr" )
UpperCAmelCase__ : List[str] = QuantumCircuit(lowercase__ , lowercase__ )
UpperCAmelCase__ : Union[str, Any] = number_of_qubits
for i in range(lowercase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase__ , lowercase__ )
# simulate with 10000 shots
UpperCAmelCase__ : Union[str, Any] = Aer.get_backend("qasm_simulator" )
UpperCAmelCase__ : Optional[int] = execute(lowercase__ , lowercase__ , shots=1_0_0_0_0 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(
F'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 199 | 0 |
class snake_case_ :
'''simple docstring'''
def __init__( self ) -> int:
UpperCAmelCase__ ={}
def __UpperCAmelCase ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(A_, " -> ", " -> ".join([str(A_ ) for j in self.vertex[i]] ) )
def __UpperCAmelCase ( self, A_, A_ ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(A_ )
else:
# else make a new vertex
UpperCAmelCase__ =[to_vertex]
def __UpperCAmelCase ( self ) -> None:
# visited array for storing already visited nodes
UpperCAmelCase__ =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(A_, A_ )
def __UpperCAmelCase ( self, A_, A_ ) -> None:
# mark start vertex as visited
UpperCAmelCase__ =True
print(A_, end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(A_, A_ )
if __name__ == "__main__":
UpperCamelCase_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 510 |
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( A , A = 2 , A = 1 , A = 3 , ):
'''simple docstring'''
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(A , A , A ) -> int:
return (pow(A , 2 ) + step) % modulus
for _ in range(A ):
# These track the position within the cycle detection logic.
UpperCAmelCase__ =seed
UpperCAmelCase__ =seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCAmelCase__ =rand_fn(A , A , A )
UpperCAmelCase__ =rand_fn(A , A , A )
UpperCAmelCase__ =rand_fn(A , A , A )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCAmelCase__ =gcd(hare - tortoise , A )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCAmelCase__ =hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
UpperCamelCase_ = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 510 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0 ):
__lowercase = -1
__lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowercase = n - a - b
if c * c == (a * a + b * b):
__lowercase = a * b * c
if candidate >= product:
__lowercase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 502 |
'''simple docstring'''
import string
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = ''''''
for i in sequence:
__lowercase = ord(lowerCamelCase_ )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = string.ascii_letters
__lowercase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCamelCase_ )] if c in letters else c for c in sequence )
def _lowerCAmelCase ( ):
from timeit import timeit
print('''Running performance benchmarks...''' )
__lowercase = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCamelCase_ )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=lowerCamelCase_ )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 502 | 1 |
'''simple docstring'''
import numpy as np
def __lowerCamelCase ( _lowercase ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( _lowercase ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A : Optional[int] = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__A : Optional[int] = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = RobertaTokenizer
def __init__( self , _A=None , _A=None , _A=None , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , _A=True , **_A , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**_A )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = '''post_processor'''
UpperCAmelCase = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase = tuple(state['''cls'''] )
UpperCAmelCase = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase = add_prefix_space
UpperCAmelCase = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase = trim_offsets
UpperCAmelCase = True
if changes_to_apply:
UpperCAmelCase = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def _lowercase ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase = value
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A )
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = kwargs.get('''is_split_into_words''' , _A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A )
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 130 |
import math
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> float:
'''simple docstring'''
UpperCAmelCase = xa
UpperCAmelCase = xa
while True:
if x_n == x_na or function(UpperCamelCase__ ) == function(UpperCamelCase__ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
UpperCAmelCase = x_na - (
function(UpperCamelCase__ ) / ((function(UpperCamelCase__ ) - function(UpperCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCAmelCase = x_na
UpperCAmelCase = x_na
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> float:
'''simple docstring'''
return math.pow(UpperCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 130 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( lowercase , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = AudioLDMPipeline
UpperCamelCase : str = TEXT_TO_AUDIO_PARAMS
UpperCamelCase : Any = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCamelCase : Union[str, Any] = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCamelCase_ , )
UpperCAmelCase__ : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
UpperCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase__ : int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
UpperCAmelCase__ : List[str] = ClapTextModelWithProjection(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
UpperCAmelCase__ : str = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCamelCase_ , )
UpperCAmelCase__ : Dict = SpeechTaHifiGan(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : str = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def __snake_case ( self ):
UpperCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Any = AudioLDMPipeline(**UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = audioldm_pipe(**UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 256
UpperCAmelCase__ : List[str] = audio[:10]
UpperCAmelCase__ : List[str] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Optional[Any] = AudioLDMPipeline(**UpperCamelCase_ )
UpperCAmelCase__ : str = audioldm_pipe.to(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = 3 * [inputs['prompt']]
# forward
UpperCAmelCase__ : Tuple = audioldm_pipe(**UpperCamelCase_ )
UpperCAmelCase__ : str = output.audios[0]
UpperCAmelCase__ : Any = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : int = 3 * [inputs.pop('prompt' )]
UpperCAmelCase__ : List[str] = audioldm_pipe.tokenizer(
UpperCamelCase_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='pt' , )
UpperCAmelCase__ : List[str] = text_inputs['input_ids'].to(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = audioldm_pipe.text_encoder(
UpperCamelCase_ , )
UpperCAmelCase__ : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase__ : Dict = F.normalize(UpperCamelCase_ , dim=-1 )
UpperCAmelCase__ : str = prompt_embeds
# forward
UpperCAmelCase__ : Dict = audioldm_pipe(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Any = AudioLDMPipeline(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = audioldm_pipe.to(UpperCamelCase_ )
UpperCAmelCase__ : Dict = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : str = 3 * ['this is a negative prompt']
UpperCAmelCase__ : Any = negative_prompt
UpperCAmelCase__ : Dict = 3 * [inputs['prompt']]
# forward
UpperCAmelCase__ : Optional[Any] = audioldm_pipe(**UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = output.audios[0]
UpperCAmelCase__ : List[str] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Any = 3 * [inputs.pop('prompt' )]
UpperCAmelCase__ : List[str] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase__ : Optional[int] = audioldm_pipe.tokenizer(
UpperCamelCase_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='pt' , )
UpperCAmelCase__ : List[str] = text_inputs['input_ids'].to(UpperCamelCase_ )
UpperCAmelCase__ : int = audioldm_pipe.text_encoder(
UpperCamelCase_ , )
UpperCAmelCase__ : int = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase__ : str = F.normalize(UpperCamelCase_ , dim=-1 )
embeds.append(UpperCamelCase_ )
UpperCAmelCase__ : int = embeds
# forward
UpperCAmelCase__ : List[str] = audioldm_pipe(**UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
UpperCAmelCase__ : int = AudioLDMPipeline(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : int = 'egg cracking'
UpperCAmelCase__ : Any = audioldm_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 256
UpperCAmelCase__ : List[str] = audio[:10]
UpperCAmelCase__ : Union[str, Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __snake_case ( self ):
UpperCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Any = self.get_dummy_components()
UpperCAmelCase__ : List[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = AudioLDMPipeline(**UpperCamelCase_ )
UpperCAmelCase__ : int = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase__ : List[str] = audioldm_pipe(UpperCamelCase_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Union[str, Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : List[Any] = audioldm_pipe(UpperCamelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase__ : Union[str, Any] = 2
UpperCAmelCase__ : int = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Union[str, Any] = AudioLDMPipeline(**UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : Any = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase__ : int = audioldm_pipe(audio_length_in_s=0.016 , **UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) / vocoder_sampling_rate == 0.016
UpperCAmelCase__ : Tuple = audioldm_pipe(audio_length_in_s=0.032 , **UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) / vocoder_sampling_rate == 0.032
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = AudioLDMPipeline(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : int = ['hey']
UpperCAmelCase__ : List[str] = audioldm_pipe(UpperCamelCase_ , num_inference_steps=1 )
UpperCAmelCase__ : Dict = output.audios.shape
assert audio_shape == (1, 256)
UpperCAmelCase__ : Optional[int] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase__ : List[str] = SpeechTaHifiGan(UpperCamelCase_ ).to(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = audioldm_pipe(UpperCamelCase_ , num_inference_steps=1 )
UpperCAmelCase__ : Any = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __snake_case ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase_ )
def __snake_case ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCamelCase_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase_ )
@slow
class a ( unittest.TestCase ):
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_="cpu" , UpperCamelCase_=torch.floataa , UpperCamelCase_=0 ):
UpperCAmelCase__ : Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : int = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 8, 128, 16) )
UpperCAmelCase__ : List[str] = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def __snake_case ( self ):
UpperCAmelCase__ : Any = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
UpperCAmelCase__ : Optional[Any] = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : int = self.get_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = 25
UpperCAmelCase__ : Dict = audioldm_pipe(**UpperCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 81_920
UpperCAmelCase__ : Tuple = audio[77_230:77_240]
UpperCAmelCase__ : Optional[int] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase__ : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
UpperCAmelCase__ : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase__ : Dict = audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = self.get_inputs(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = audioldm_pipe(**UpperCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 81_920
UpperCAmelCase__ : Dict = audio[27_780:27_790]
UpperCAmelCase__ : Any = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase__ : List[str] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 712 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=32 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[10, 20, 30, 40] , UpperCamelCase_=[2, 2, 3, 2] , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=["stage2", "stage3", "stage4"] , UpperCamelCase_=[2, 3, 4] , UpperCamelCase_=None , ):
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Tuple = image_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : List[str] = num_stages
UpperCAmelCase__ : Optional[int] = hidden_sizes
UpperCAmelCase__ : int = depths
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : Union[str, Any] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : int = num_labels
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[Any] = out_features
UpperCAmelCase__ : Tuple = out_indices
UpperCAmelCase__ : Dict = scope
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Tuple = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ConvNextModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : int = model(UpperCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : str = ConvNextForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = ConvNextBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[int] = model(UpperCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Dict = ConvNextBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[int] = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : str = True
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Any = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[Any] = False
def __snake_case ( self ):
UpperCAmelCase__ : str = ConvNextModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __snake_case ( self ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase_ )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCAmelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : str = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def __snake_case ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = ConvNextModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase ( ):
UpperCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase_ )
UpperCAmelCase__ : str = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : str = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**UpperCamelCase_ )
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@require_torch
class a ( unittest.TestCase , lowercase ):
UpperCamelCase : str = (ConvNextBackbone,) if is_torch_available() else ()
UpperCamelCase : List[str] = ConvNextConfig
UpperCamelCase : Tuple = False
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = ConvNextModelTester(self )
| 254 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = ['input_values', 'padding_mask']
def __init__( self : Optional[int] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Tuple = 24000 , UpperCAmelCase__ : Optional[int] = 0.0 , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : int = None , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
super().__init__(feature_size=_a , sampling_rate=_a , padding_value=_a , **_a )
lowercase : List[Any] =chunk_length_s
lowercase : int =overlap
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : List[str] = False , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Union[str, Any] = None , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
lowercase : Union[str, Any] =True
lowercase : Union[str, Any] =bool(
isinstance(_a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
lowercase : Union[str, Any] =[np.asarray(_a , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowercase : str =np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase : str =raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : str =[np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase : Optional[int] =None
lowercase : Union[str, Any] =BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase : Optional[Any] =min(array.shape[0] for array in raw_audio )
lowercase : Tuple =int(np.floor(max_length / self.chunk_stride ) )
lowercase : List[Any] =(nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase : Optional[int] =max(array.shape[0] for array in raw_audio )
lowercase : Union[str, Any] =int(np.ceil(max_length / self.chunk_stride ) )
lowercase : int =(nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase : Tuple ='''max_length'''
else:
lowercase : List[Any] =input_values
# normal padding on batch
if padded_inputs is None:
lowercase : Dict =self.pad(
_a , max_length=_a , truncation=_a , padding=_a , return_attention_mask=_a , )
if padding:
lowercase : int =padded_inputs.pop('''attention_mask''' )
lowercase : int =[]
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
lowercase : Any =example[..., None]
input_values.append(example.T )
lowercase : Optional[Any] =input_values
if return_tensors is not None:
lowercase : str =padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 92 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(IPNDMScheduler,)
SCREAMING_SNAKE_CASE__ =(("""num_inference_steps""", 50),)
def __lowerCAmelCase ( self, **_a ) -> str:
__SCREAMING_SNAKE_CASE = {"num_train_timesteps": 10_00}
config.update(**_a )
return config
def __lowerCAmelCase ( self, _a=0, **_a ) -> List[Any]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self, _a=0, **_a ) -> int:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
__SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
__SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = new_scheduler.step(_a, _a, _a, **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self, **_a ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
__SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps", _a )
for scheduler_class in self.scheduler_classes:
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_a, "set_timesteps" ):
scheduler.set_timesteps(_a )
elif num_inference_steps is not None and not hasattr(_a, "set_timesteps" ):
__SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, **_a ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def __lowerCAmelCase ( self ) -> str:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_a, time_step=_a )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 693 | 0 |
import sys
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
__lowercase : Optional[Any] = len(__lowerCAmelCase )
__lowercase : List[str] = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
__lowercase : List[str] = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
__lowercase : Any = a + chain_length - 1
__lowercase : List[str] = sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
__lowercase : Optional[int] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowercase : List[Any] = cost
__lowercase : Union[str, Any] = c
return matrix, sol
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if i == j:
print('''A''' + str(__lowerCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(''')''' , end=''' ''' )
def UpperCAmelCase_ ( ) -> Optional[int]:
__lowercase : Union[str, Any] = [30, 35, 15, 5, 10, 20, 25]
__lowercase : Dict = len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowercase , __lowercase : int = matrix_chain_order(__lowerCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 284 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Optional[int] , _snake_case : List[Any]=13 , _snake_case : int=32 , _snake_case : int=3 , _snake_case : Any=4 , _snake_case : Optional[int]=[10, 20, 30, 40] , _snake_case : Optional[Any]=[2, 2, 3, 2] , _snake_case : Dict=True , _snake_case : List[Any]=True , _snake_case : int=37 , _snake_case : Union[str, Any]="gelu" , _snake_case : Tuple=10 , _snake_case : Tuple=0.02 , _snake_case : List[str]=["stage2", "stage3", "stage4"] , _snake_case : Tuple=3 , _snake_case : int=None , ):
__lowercase : List[Any] = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : Optional[Any] = image_size
__lowercase : Optional[Any] = num_channels
__lowercase : List[str] = num_stages
__lowercase : Union[str, Any] = hidden_sizes
__lowercase : Optional[Any] = depths
__lowercase : List[Any] = is_training
__lowercase : List[str] = use_labels
__lowercase : Tuple = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Dict = type_sequence_label_size
__lowercase : Dict = initializer_range
__lowercase : str = out_features
__lowercase : Tuple = num_labels
__lowercase : Tuple = scope
__lowercase : Optional[Any] = num_stages
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Dict = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : List[str] ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case_ ( self : int ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_snake_case , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_snake_case , loss_ignore_index=255 , num_labels=self.num_labels , )
def snake_case_ ( self : Tuple , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[str] ):
__lowercase : Optional[int] = UperNetForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowercase : Optional[Any] = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Optional[int] = config_and_inputs
__lowercase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A__ : Union[str, Any] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
A__ : Union[str, Any] = False
A__ : Optional[Any] = False
A__ : int = False
A__ : Optional[int] = False
A__ : Optional[Any] = False
A__ : List[str] = False
def snake_case_ ( self : Optional[Any] ):
__lowercase : str = UperNetModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def snake_case_ ( self : str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self : str ):
return
def snake_case_ ( self : str ):
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(_snake_case )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Dict = [*signature.parameters.keys()]
__lowercase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def snake_case_ ( self : List[str] ):
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case_ ( self : List[Any] ):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case_ ( self : List[Any] ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case_ ( self : str ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case_ ( self : int ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case_ ( self : int ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case_ ( self : List[str] ):
pass
def snake_case_ ( self : List[Any] ):
def check_hidden_states_output(_snake_case : int , _snake_case : Optional[int] , _snake_case : Dict ):
__lowercase : Dict = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
__lowercase : Optional[int] = model(**self._prepare_for_class(_snake_case , _snake_case ) )
__lowercase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : int = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Any = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def snake_case_ ( self : Tuple ):
__lowercase , __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Dict = _config_zero_init(_snake_case )
__lowercase : Dict = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowercase : Union[str, Any] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case_ ( self : str ):
pass
@slow
def snake_case_ ( self : Dict ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[Any] = UperNetForSemanticSegmentation.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase_ ( ) -> Optional[int]:
__lowercase : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
__lowercase : int = Image.open(__lowerCAmelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
__lowercase : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_snake_case )
__lowercase : Tuple = prepare_img()
__lowercase : Optional[int] = processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
with torch.no_grad():
__lowercase : Dict = model(**_snake_case )
__lowercase : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowercase : List[Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1E-4 ) )
def snake_case_ ( self : Optional[int] ):
__lowercase : List[str] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
__lowercase : Dict = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_snake_case )
__lowercase : Any = prepare_img()
__lowercase : Union[str, Any] = processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
with torch.no_grad():
__lowercase : Tuple = model(**_snake_case )
__lowercase : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowercase : List[Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1E-4 ) )
| 284 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__UpperCAmelCase : Any = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__UpperCAmelCase : List[Any] = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__UpperCAmelCase : Optional[Any] = '|'.join(sys.argv[1:])
__UpperCAmelCase : Optional[int] = re.compile(rf'''^({joined_dirs}).*?\.py$''')
__UpperCAmelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 471 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase : List[str] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__UpperCAmelCase : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__UpperCAmelCase : Union[str, Any] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : int = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
_a : List[Any] = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : str = float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )
_a : str = float(spearmanr(UpperCamelCase_ , UpperCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def snake_case_ ( self : Dict ) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Optional[int] , __snake_case : Any , __snake_case : Any ) -> Union[str, Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case , __snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 471 | 1 |
import math
def __UpperCAmelCase ( UpperCAmelCase = 100 )-> Dict:
"""simple docstring"""
lowercase = sum(i * i for i in range(1, n + 1 ) )
lowercase = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 705 | from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __lowercase :
# setable values
lowercase = None
lowercase = None
lowercase = None # sigma(t_i)
@classmethod
def __a ( cls : List[str] ) -> Dict:
'''simple docstring'''
return cls()
@dataclass
class __lowercase ( _A ):
lowercase = 42
lowercase = 42
lowercase = 42
class __lowercase ( _A , _A ):
@property
def __a ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : int , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 1_00 , __lowerCamelCase : float = 1.007 , __lowerCamelCase : float = 80 , __lowerCamelCase : float = 0.05 , __lowerCamelCase : float = 50 , ) -> List[Any]:
'''simple docstring'''
pass
def __a ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : int , __lowerCamelCase : Tuple = () ) -> KarrasVeSchedulerState:
'''simple docstring'''
lowercase = jnp.arange(0 , __lowerCamelCase )[::-1].copy()
lowercase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__lowerCamelCase , schedule=jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , timesteps=__lowerCamelCase , )
def __a ( self : Optional[Any] , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowercase = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase = random.split(__lowerCamelCase , num=1 )
lowercase = self.config.s_noise * random.normal(key=__lowerCamelCase , shape=sample.shape )
lowercase = sigma + gamma * sigma
lowercase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
lowercase = sample_hat + sigma_hat * model_output
lowercase = (sample_hat - pred_original_sample) / sigma_hat
lowercase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , state=__lowerCamelCase )
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
lowercase = sample_prev + sigma_prev * model_output
lowercase = (sample_prev - pred_original_sample) / sigma_prev
lowercase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , state=__lowerCamelCase )
def __a ( self : int , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
| 479 | 0 |
from __future__ import annotations
__UpperCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__UpperCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def UpperCamelCase ( snake_case__ : list[float] ) -> list[float]:
UpperCamelCase : Optional[int] = []
UpperCamelCase : Union[str, Any] = len(snake_case__ )
for i in range(snake_case__ ):
UpperCamelCase : float = -1
for j in range(i + 1 , snake_case__ ):
if arr[i] < arr[j]:
UpperCamelCase : Optional[Any] = arr[j]
break
result.append(snake_case__ )
return result
def UpperCamelCase ( snake_case__ : list[float] ) -> list[float]:
UpperCamelCase : List[Any] = []
for i, outer in enumerate(snake_case__ ):
UpperCamelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCamelCase : Optional[Any] = inner
break
result.append(snake_case__ )
return result
def UpperCamelCase ( snake_case__ : list[float] ) -> list[float]:
UpperCamelCase : Union[str, Any] = len(snake_case__ )
UpperCamelCase : list[float] = []
UpperCamelCase : list[float] = [-1] * arr_size
for index in reversed(range(snake_case__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCamelCase : Tuple = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__UpperCAmelCase = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 40 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a ( __lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ShapEPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = ['''prompt''']
SCREAMING_SNAKE_CASE__ : List[str] = ['''prompt''']
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE__ : str = False
@property
def snake_case_ ( self ):
"""simple docstring"""
return 32
@property
def snake_case_ ( self ):
"""simple docstring"""
return 32
@property
def snake_case_ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
"""simple docstring"""
return 8
@property
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__SCREAMING_SNAKE_CASE: Tuple = PriorTransformer(**_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE: Union[str, Any] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__SCREAMING_SNAKE_CASE: Dict = ShapERenderer(**_lowerCAmelCase )
return model
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.dummy_prior
__SCREAMING_SNAKE_CASE: List[str] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE: Tuple = self.dummy_renderer
__SCREAMING_SNAKE_CASE: Optional[int] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
__SCREAMING_SNAKE_CASE: Optional[int] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
"""simple docstring"""
if str(_lowerCAmelCase ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE: List[str] = torch.manual_seed(_lowerCAmelCase )
else:
__SCREAMING_SNAKE_CASE: Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = '''cpu'''
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE: List[str] = self.pipeline_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: str = output.images[0]
__SCREAMING_SNAKE_CASE: int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__SCREAMING_SNAKE_CASE: Union[str, Any] = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE: str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.get_dummy_components()
__SCREAMING_SNAKE_CASE: Tuple = self.pipeline_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Dict = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = 1
__SCREAMING_SNAKE_CASE: Optional[int] = 2
__SCREAMING_SNAKE_CASE: Optional[int] = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__SCREAMING_SNAKE_CASE: List[str] = batch_size * [inputs[key]]
__SCREAMING_SNAKE_CASE: Optional[Any] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__SCREAMING_SNAKE_CASE: Optional[int] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__SCREAMING_SNAKE_CASE: List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE: str = pipe(
'''a shark''' , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 703 |
def lowerCAmelCase ( UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__SCREAMING_SNAKE_CASE: List[Any] = F"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[Any] = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 318 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 567 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __lowerCAmelCase ( yaml.SafeLoader ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
__lowerCamelCase = [tuple(lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else key for key in keys]
__lowerCamelCase = Counter(lowerCamelCase__ )
__lowerCamelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
__lowerCamelCase = super().construct_mapping(lowerCamelCase__ , deep=lowerCamelCase__ )
self._check_no_duplicates_on_constructed_node(lowerCamelCase__ )
return mapping
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Tuple[Optional[str], str]:
"""simple docstring"""
__lowerCamelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__lowerCamelCase = full_content[1:].index('---' ) + 1
__lowerCamelCase = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def lowercase_ ( cls , lowerCamelCase__ ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCamelCase__ , encoding='utf-8' ) as readme_file:
__lowerCamelCase , __lowerCamelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCamelCase__ )
else:
return cls()
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
if path.exists():
with open(lowerCamelCase__ , encoding='utf-8' ) as readme_file:
__lowerCamelCase = readme_file.read()
else:
__lowerCamelCase = None
__lowerCamelCase = self._to_readme(lowerCamelCase__ )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ = None ) -> str:
'''simple docstring'''
if readme_content is not None:
__lowerCamelCase , __lowerCamelCase = _split_yaml_from_readme(lowerCamelCase__ )
__lowerCamelCase = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__lowerCamelCase = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def lowercase_ ( cls , lowerCamelCase__ ) -> "DatasetMetadata":
'''simple docstring'''
__lowerCamelCase = yaml.load(lowerCamelCase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__lowerCamelCase = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCamelCase__ , allow_unicode=lowerCamelCase__ , encoding='utf-8' , ).decode('utf-8' )
__A = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__A = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__A = ap.parse_args()
__A = Path(args.readme_filepath)
__A = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 711 |
__A = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 167 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE__ = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["DPTFeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 631 |
from __future__ import annotations
import math
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = u
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = temp * (u - i)
return temp
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = int(input("enter the numbers of values: " ) )
SCREAMING_SNAKE_CASE_ :list[list[float]] = []
for _ in range(a ):
y.append([] )
for i in range(a ):
for j in range(a ):
y[i].append(a )
SCREAMING_SNAKE_CASE_ :Any = 0
print("enter the values of parameters in a list: " )
SCREAMING_SNAKE_CASE_ :Dict = list(map(a , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a ):
SCREAMING_SNAKE_CASE_ :List[Any] = float(input() )
SCREAMING_SNAKE_CASE_ :Optional[Any] = int(input("enter the value to interpolate: " ) )
SCREAMING_SNAKE_CASE_ :str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE_ :List[str] = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE_ :Tuple = y[0][0]
for i in range(1 , a ):
summ += (ucal(a , a ) * y[0][i]) / math.factorial(a )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 631 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Optional[Any] = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 |
'''simple docstring'''
import numpy as np
import qiskit
def _A (lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :int | None = None ) -> str:
'''simple docstring'''
_a = np.random.default_rng(seed=lowerCAmelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_a = 6 * key_len
# Measurement basis for Alice's qubits.
_a = rng.integers(2 , size=lowerCAmelCase__ )
# The set of states Alice will prepare.
_a = rng.integers(2 , size=lowerCAmelCase__ )
# Measurement basis for Bob's qubits.
_a = rng.integers(2 , size=lowerCAmelCase__ )
# Quantum Circuit to simulate BB84
_a = qiskit.QuantumCircuit(lowerCAmelCase__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(lowerCAmelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_a = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_a = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=1 , seed_simulator=lowerCAmelCase__ )
# Returns the result of measurement.
_a = job.result().get_counts(lowerCAmelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_a = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_a = gen_key[:key_len] if len(lowerCAmelCase__ ) >= key_len else gen_key.ljust(lowerCAmelCase__ , '0' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 532 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCamelCase = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 204 |
import random
class UpperCAmelCase :
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
snake_case : int = [ord(snake_case__ ) for i in text]
snake_case : Optional[int] = []
snake_case : int = []
for i in plain:
snake_case : List[Any] = random.randint(1 , 3_00 )
snake_case : List[Any] = (i + k) * k
cipher.append(snake_case__ )
key.append(snake_case__ )
return cipher, key
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : list[int] , snake_case__ : list[int] ) -> str:
'''simple docstring'''
snake_case : int = []
for i in range(len(snake_case__ ) ):
snake_case : List[str] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case__ ) )
return "".join(snake_case__ )
if __name__ == "__main__":
__lowerCamelCase, __lowerCamelCase = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 204 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
if n_term == "":
return []
a__ = []
for temp in range(int(lowerCamelCase_)):
series.append(f'1/{temp + 1}' if series else '''1''')
return series
if __name__ == "__main__":
__a : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 200 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__a : Union[str, Any] = None
__a : Union[str, Any] = {
'7B': 1_1008,
'13B': 1_3824,
'30B': 1_7920,
'65B': 2_2016,
'70B': 2_8672,
}
__a : List[Any] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_=1 , lowerCamelCase_=256):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
with open(lowerCamelCase_ , '''r''') as f:
return json.load(lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_):
with open(lowerCamelCase_ , '''w''') as f:
json.dump(lowerCamelCase_ , lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True):
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
a__ = os.path.join(lowerCamelCase_ , '''tmp''')
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
a__ = read_json(os.path.join(lowerCamelCase_ , '''params.json'''))
a__ = NUM_SHARDS[model_size]
a__ = params['''n_layers''']
a__ = params['''n_heads''']
a__ = n_heads // num_shards
a__ = params['''dim''']
a__ = dim // n_heads
a__ = 10000.0
a__ = 1.0 / (base ** (torch.arange(0 , lowerCamelCase_ , 2).float() / dims_per_head))
if "n_kv_heads" in params:
a__ = params['''n_kv_heads'''] # for GQA / MQA
a__ = n_heads_per_shard // num_key_value_heads
a__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
a__ = n_heads
a__ = n_heads_per_shard
a__ = dim
# permute for sliced rotary
def permute(lowerCamelCase_ , lowerCamelCase_=n_heads , lowerCamelCase_=dim , lowerCamelCase_=dim):
return w.view(lowerCamelCase_ , dima // n_heads // 2 , 2 , lowerCamelCase_).transpose(1 , 2).reshape(lowerCamelCase_ , lowerCamelCase_)
print(f'Fetching all parameters from the checkpoint at {input_base_path}.')
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
a__ = torch.load(os.path.join(lowerCamelCase_ , '''consolidated.00.pth''') , map_location='''cpu''')
else:
# Sharded
a__ = [
torch.load(os.path.join(lowerCamelCase_ , f'consolidated.{i:02d}.pth') , map_location='''cpu''')
for i in range(lowerCamelCase_)
]
a__ = 0
a__ = {'''weight_map''': {}}
for layer_i in range(lowerCamelCase_):
a__ = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
a__ = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight']),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight']),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
a__ = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
a__ = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for i in range(lowerCamelCase_)
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_))
a__ = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for i in range(lowerCamelCase_)
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
a__ = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for i in range(lowerCamelCase_)
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(lowerCamelCase_)] , dim=1)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(lowerCamelCase_)] , dim=0)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(lowerCamelCase_)] , dim=1)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(lowerCamelCase_)] , dim=0)
a__ = inv_freq
for k, v in state_dict.items():
a__ = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_))
a__ = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
a__ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
a__ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(lowerCamelCase_)] , dim=1),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(lowerCamelCase_)] , dim=0),
}
for k, v in state_dict.items():
a__ = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_))
# Write configs
a__ = {'''total_size''': param_count * 2}
write_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , '''pytorch_model.bin.index.json'''))
a__ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
a__ = params['''multiple_of'''] if '''multiple_of''' in params else 256
a__ = LlamaConfig(
hidden_size=lowerCamelCase_ , intermediate_size=compute_intermediate_size(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=lowerCamelCase_ , )
config.save_pretrained(lowerCamelCase_)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''')
a__ = LlamaForCausalLM.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCamelCase_)
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''')
model.save_pretrained(lowerCamelCase_ , safe_serialization=lowerCamelCase_)
shutil.rmtree(lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_):
# Initialize the tokenizer based on the `spm` model
a__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.')
a__ = tokenizer_class(lowerCamelCase_)
tokenizer.save_pretrained(lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( ):
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=lowerCamelCase_ , help='''Whether or not to save using `safetensors`.''')
a__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
a__ = os.path.join(args.input_dir , '''tokenizer.model''')
write_tokenizer(args.output_dir , lowerCamelCase_)
if __name__ == "__main__":
main()
| 200 | 1 |
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = len(A__ )
lowercase__ = len(A__ )
lowercase__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowercase__ = []
for char_count in range(A__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(A__ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 622 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="Speech2TextFeatureExtractor"
a : int ="Speech2TextTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
lowerCAmelCase : Any = self.feature_extractor
lowerCAmelCase : str = False
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCAmelCase : Any = kwargs.pop("raw_speech" )
else:
lowerCAmelCase : Optional[int] = kwargs.pop("audio" , snake_case__ )
lowerCAmelCase : Union[str, Any] = kwargs.pop("sampling_rate" , snake_case__ )
lowerCAmelCase : str = kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
lowerCAmelCase : int = args[0]
lowerCAmelCase : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase : Dict = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
lowerCAmelCase : int = self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase : Dict = encodings["input_ids"]
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCAmelCase : List[str] = True
lowerCAmelCase : Any = self.tokenizer
yield
lowerCAmelCase : Optional[Any] = self.feature_extractor
lowerCAmelCase : Dict = False
| 645 | 0 |
'''simple docstring'''
from ....utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=None , snake_case_=2048 ):
'''simple docstring'''
__UpperCAmelCase: List[str] = config.__dict__
__UpperCAmelCase: str = modal_hidden_size
if num_labels:
__UpperCAmelCase: int = num_labels | 466 | '''simple docstring'''
import itertools
import math
def UpperCamelCase__ ( _lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase__ ( ) -> Optional[int]:
__UpperCAmelCase: Union[str, Any] = 2
while True:
if is_prime(_lowercase ):
yield num
num += 1
def UpperCamelCase__ ( _lowercase : int = 1_0_0_0_1 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _lowercase ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 466 | 1 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = DownBlockaD # noqa F405
_lowerCamelCase : int = 'down'
def __A ( self : List[str] ):
A_ = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ResnetDownsampleBlockaD # noqa F405
_lowerCamelCase : str = 'down'
def __A ( self : List[Any] ):
A_ = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = AttnDownBlockaD # noqa F405
_lowerCamelCase : str = 'down'
def __A ( self : List[str] ):
A_ = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = CrossAttnDownBlockaD # noqa F405
_lowerCamelCase : Optional[int] = 'down'
def __A ( self : Union[str, Any] ):
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
def __A ( self : Union[str, Any] ):
A_ = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = SimpleCrossAttnDownBlockaD # noqa F405
_lowerCamelCase : Dict = 'down'
@property
def __A ( self : Optional[Any] ):
return super().get_dummy_input(include_encoder_hidden_states=UpperCAmelCase )
def __A ( self : str ):
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __A ( self : List[str] ):
A_ = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = SkipDownBlockaD # noqa F405
_lowerCamelCase : Optional[Any] = 'down'
@property
def __A ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=UpperCAmelCase )
def __A ( self : str ):
A_ = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AttnSkipDownBlockaD # noqa F405
_lowerCamelCase : str = 'down'
@property
def __A ( self : Any ):
return super().get_dummy_input(include_skip_sample=UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[str] = DownEncoderBlockaD # noqa F405
_lowerCamelCase : Optional[Any] = 'down'
@property
def __A ( self : Optional[int] ):
return super().get_dummy_input(include_temb=UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : Dict ):
A_ = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = AttnDownEncoderBlockaD # noqa F405
_lowerCamelCase : Tuple = 'down'
@property
def __A ( self : Dict ):
return super().get_dummy_input(include_temb=UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : Optional[Any] ):
A_ = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = UNetMidBlockaD # noqa F405
_lowerCamelCase : int = 'mid'
def __A ( self : List[str] ):
A_ = {
"in_channels": 32,
"temb_channels": 128,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : str ):
A_ = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = UNetMidBlockaDCrossAttn # noqa F405
_lowerCamelCase : Union[str, Any] = 'mid'
def __A ( self : Any ):
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
def __A ( self : Optional[Any] ):
A_ = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
_lowerCamelCase : str = 'mid'
@property
def __A ( self : Union[str, Any] ):
return super().get_dummy_input(include_encoder_hidden_states=UpperCAmelCase )
def __A ( self : Dict ):
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
def __A ( self : Optional[int] ):
A_ = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Tuple = UpBlockaD # noqa F405
_lowerCamelCase : Optional[Any] = 'up'
@property
def __A ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[str] = ResnetUpsampleBlockaD # noqa F405
_lowerCamelCase : int = 'up'
@property
def __A ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
def __A ( self : Tuple ):
A_ = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = CrossAttnUpBlockaD # noqa F405
_lowerCamelCase : List[Any] = 'up'
@property
def __A ( self : Union[str, Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
def __A ( self : str ):
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
def __A ( self : int ):
A_ = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[str] = SimpleCrossAttnUpBlockaD # noqa F405
_lowerCamelCase : int = 'up'
@property
def __A ( self : List[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase , include_encoder_hidden_states=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ , A_ = super().prepare_init_args_and_inputs_for_common()
A_ = 32
return init_dict, inputs_dict
def __A ( self : List[Any] ):
A_ = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Tuple = AttnUpBlockaD # noqa F405
_lowerCamelCase : Optional[int] = 'up'
@property
def __A ( self : Any ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def __A ( self : List[str] ):
A_ = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = SkipUpBlockaD # noqa F405
_lowerCamelCase : Any = 'up'
@property
def __A ( self : List[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
def __A ( self : str ):
A_ = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AttnSkipUpBlockaD # noqa F405
_lowerCamelCase : str = 'up'
@property
def __A ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase )
def __A ( self : int ):
A_ = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = UpDecoderBlockaD # noqa F405
_lowerCamelCase : List[str] = 'up'
@property
def __A ( self : Optional[int] ):
return super().get_dummy_input(include_temb=UpperCAmelCase )
def __A ( self : int ):
A_ = {"in_channels": 32, "out_channels": 32}
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : Any ):
A_ = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(UpperCAmelCase )
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = AttnUpDecoderBlockaD # noqa F405
_lowerCamelCase : str = 'up'
@property
def __A ( self : Optional[int] ):
return super().get_dummy_input(include_temb=UpperCAmelCase )
def __A ( self : Tuple ):
A_ = {"in_channels": 32, "out_channels": 32}
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : Dict ):
A_ = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(UpperCAmelCase ) | 86 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__ :
def __init__( self , snake_case , snake_case=1_4 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> Dict:
"""simple docstring"""
lowercase : int = parent
lowercase : Tuple = batch_size
lowercase : Optional[Any] = seq_length
lowercase : Any = is_training
lowercase : int = use_token_type_ids
lowercase : Optional[int] = use_input_mask
lowercase : List[Any] = use_labels
lowercase : Dict = use_mc_token_ids
lowercase : Union[str, Any] = vocab_size
lowercase : Any = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : Any = num_attention_heads
lowercase : List[Any] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : List[str] = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Tuple = max_position_embeddings
lowercase : int = type_vocab_size
lowercase : Any = type_sequence_label_size
lowercase : Tuple = initializer_range
lowercase : int = num_labels
lowercase : Union[str, Any] = num_choices
lowercase : Union[str, Any] = scope
lowercase : Dict = self.vocab_size - 1
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[str] = None
if self.use_input_mask:
lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_token_type_ids:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Optional[int] = None
if self.use_mc_token_ids:
lowercase : Dict = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowercase : Tuple = None
lowercase : List[Any] = None
lowercase : int = None
if self.use_labels:
lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : int = self.get_config()
lowercase : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , *snake_case ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] = CTRLModel(config=snake_case )
model.to(snake_case )
model.eval()
model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
model(snake_case , token_type_ids=snake_case )
lowercase : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , *snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple = CTRLLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
lowercase : Optional[Any] = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict = config_and_inputs
lowercase : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , *snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : Any = self.num_labels
lowercase : Optional[Any] = CTRLForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[str] = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__UpperCAmelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCAmelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Tuple = CTRLModelTester(self )
lowercase : int = ConfigTester(self , config_class=snake_case , n_embd=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*snake_case )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Tuple = CTRLModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Tuple = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(snake_case )
lowercase : Tuple = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=snake_case ) # Legal the president is
lowercase : List[Any] = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase : int = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 607 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
a__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ = 5000):
a__ = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCamelCase_)]
for i, pentagonal_i in enumerate(lowerCamelCase_):
for j in range(lowerCamelCase_ , len(lowerCamelCase_)):
a__ = pentagonal_nums[j]
a__ = pentagonal_i + pentagonal_j
a__ = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCamelCase_) and is_pentagonal(lowerCamelCase_):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 200 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Any = logging.get_logger(__name__)
__a : Union[str, Any] = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='swin2sr'
_SCREAMING_SNAKE_CASE ={
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Union[str, Any] , __A: List[Any]=64 , __A: int=1 , __A: Dict=3 , __A: List[Any]=180 , __A: int=[6, 6, 6, 6, 6, 6] , __A: Tuple=[6, 6, 6, 6, 6, 6] , __A: int=8 , __A: Optional[int]=2.0 , __A: Optional[int]=True , __A: int=0.0 , __A: Any=0.0 , __A: Optional[Any]=0.1 , __A: Optional[Any]="gelu" , __A: Dict=False , __A: List[Any]=0.0_2 , __A: List[Any]=1e-5 , __A: List[str]=2 , __A: int=1.0 , __A: Dict="1conv" , __A: Optional[Any]="pixelshuffle" , **__A: Dict , ):
'''simple docstring'''
super().__init__(**__A )
a__ = image_size
a__ = patch_size
a__ = num_channels
a__ = embed_dim
a__ = depths
a__ = len(__A )
a__ = num_heads
a__ = window_size
a__ = mlp_ratio
a__ = qkv_bias
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = drop_path_rate
a__ = hidden_act
a__ = use_absolute_embeddings
a__ = layer_norm_eps
a__ = initializer_range
a__ = upscale
a__ = img_range
a__ = resi_connection
a__ = upsampler
| 200 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=8 ) -> str:
'''simple docstring'''
a_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: UNetaDConditionModel , a: DDPMScheduler , a: VQModel , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
a_ = 2 ** (len(self.movq.config.block_out_channels) - 1)
def _lowerCAmelCase ( self: Union[str, Any] , a: str , a: Dict , a: List[str] , a: Optional[Any] , a: Union[str, Any] , a: List[Any]) ->int:
'''simple docstring'''
if latents is None:
a_ = randn_tensor(a , generator=a , device=a , dtype=a)
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""")
a_ = latents.to(a)
a_ = latents * scheduler.init_noise_sigma
return latents
def _lowerCAmelCase ( self: List[Any] , a: int=0) ->List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
a_ = torch.device(f"""cuda:{gpu_id}""")
a_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a)
def _lowerCAmelCase ( self: Optional[int] , a: Dict=0) ->Optional[Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
a_ = torch.device(f"""cuda:{gpu_id}""")
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ = cpu_offload_with_hook(a , a , prev_module_hook=a)
# We'll offload the last model manually.
a_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook")
and hasattr(module._hf_hook , "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(a)
def __call__( self: Optional[int] , a: Union[torch.FloatTensor, List[torch.FloatTensor]] , a: Union[torch.FloatTensor, List[torch.FloatTensor]] , a: int = 5_12 , a: int = 5_12 , a: int = 1_00 , a: float = 4.0 , a: int = 1 , a: Optional[Union[torch.Generator, List[torch.Generator]]] = None , a: Optional[torch.FloatTensor] = None , a: Optional[str] = "pil" , a: bool = True , ) ->Union[str, Any]:
'''simple docstring'''
a_ = self._execution_device
a_ = guidance_scale > 1.0
if isinstance(a , a):
a_ = torch.cat(a , dim=0)
a_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(a , a):
a_ = torch.cat(a , dim=0)
if do_classifier_free_guidance:
a_ = image_embeds.repeat_interleave(a , dim=0)
a_ = negative_image_embeds.repeat_interleave(a , dim=0)
a_ = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=a)
self.scheduler.set_timesteps(a , device=a)
a_ = self.scheduler.timesteps
a_ = self.unet.config.in_channels
a_ , a_ = downscale_height_and_width(a , a , self.movq_scale_factor)
# create initial latent
a_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a)):
# expand the latents if we are doing classifier free guidance
a_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a_ = {"image_embeds": image_embeds}
a_ = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
a_ , a_ = noise_pred.split(latents.shape[1] , dim=1)
a_ , a_ = noise_pred.chunk(2)
a_ , a_ = variance_pred.chunk(2)
a_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , "variance_type")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
a_ = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
a_ = self.movq.decode(a , force_not_quantize=a)["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""")
if output_type in ["np", "pil"]:
a_ = image * 0.5 + 0.5
a_ = image.clamp(0 , 1)
a_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
a_ = self.numpy_to_pil(a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a)
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Dict ="""falcon"""
UpperCamelCase__ : Any =["""past_key_values"""]
def __init__( self : Any, __lowercase : Tuple=6_5024, __lowercase : Optional[Any]=4544, __lowercase : Optional[Any]=32, __lowercase : str=71, __lowercase : Any=1e-5, __lowercase : int=0.02, __lowercase : Optional[int]=True, __lowercase : List[str]=0.0, __lowercase : Optional[int]=0.0, __lowercase : Optional[Any]=None, __lowercase : str=False, __lowercase : Tuple=False, __lowercase : List[str]=True, __lowercase : Optional[Any]=True, __lowercase : int=False, __lowercase : Optional[Any]=11, __lowercase : List[str]=11, **__lowercase : Tuple, ):
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("n_embed", __lowercase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase )
@property
def A__ ( self : int ):
return self.hidden_size // self.num_attention_heads
@property
def A__ ( self : str ):
return not self.alibi
| 704 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : Any = '''ctrl'''
_a : str = ['''past_key_values''']
_a : List[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , UpperCamelCase__=246_534 , UpperCamelCase__=256 , UpperCamelCase__=1_280 , UpperCamelCase__=8_192 , UpperCamelCase__=48 , UpperCamelCase__=16 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1e-6 , UpperCamelCase__=0.02 , UpperCamelCase__=True , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = vocab_size
a_ = n_positions
a_ = n_embd
a_ = n_layer
a_ = n_head
a_ = dff
a_ = resid_pdrop
a_ = embd_pdrop
a_ = layer_norm_epsilon
a_ = initializer_range
a_ = use_cache
super().__init__(**UpperCamelCase__ )
| 536 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["GLPNFeatureExtractor"]
__lowerCAmelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 536 | 1 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowercase :
'''simple docstring'''
def __init__( self )-> None:
UpperCAmelCase__ : Union[str, Any] = [2, 1, 2, -1]
UpperCAmelCase__ : Dict = [1, 2, 3, 4]
def lowerCAmelCase__ ( self )-> list[float]:
UpperCAmelCase__ : Tuple = len(self.first_signal )
UpperCAmelCase__ : Union[str, Any] = len(self.second_signal )
UpperCAmelCase__ : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ )
# create a zero matrix of max_length x max_length
UpperCAmelCase__ : Union[str, Any] = [[0] * max_length for i in range(lowerCAmelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase_ ):
UpperCAmelCase__ : str = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase_ )
for j, item in enumerate(lowerCAmelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCAmelCase__ : Any = np.matmul(np.transpose(lowerCAmelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660 | 0 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase_ = 6378137.0
lowercase_ = 6356752.314245
lowercase_ = 6_3_7_8_1_3_7
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
__a = (AXIS_A - AXIS_B) / AXIS_A
__a = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
__a = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
__a = radians(lowerCAmelCase__ )
__a = radians(lowerCAmelCase__ )
# Equation
__a = sin((phi_a - phi_a) / 2 )
__a = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__a = sqrt(sin_sq_phi + (cos(lowerCAmelCase__ ) * cos(lowerCAmelCase__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = DownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
def __UpperCAmelCase ( self ):
__a = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SkipDownBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_skip_sample=_a )
def __UpperCAmelCase ( self ):
__a = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'down'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Any = 'mid'
def __UpperCAmelCase ( self ):
__a = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : str = 'mid'
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def __UpperCAmelCase ( self ):
__a , __a = super().prepare_init_args_and_inputs_for_common()
__a = 32
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = AttnUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __UpperCAmelCase ( self ):
__a = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = SkipUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __UpperCAmelCase ( self ):
__a = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Any = 'up'
@property
def __UpperCAmelCase ( self ):
return super().get_dummy_input(include_temb=_a )
def __UpperCAmelCase ( self ):
__a = {'''in_channels''': 32, '''out_channels''': 32}
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
__a = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_a )
| 695 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""openai/whisper-base"""
__UpperCAmelCase : List[str] =(
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
__UpperCAmelCase : Optional[Any] ="""transcriber"""
__UpperCAmelCase : Any =WhisperProcessor
__UpperCAmelCase : Dict =WhisperForConditionalGeneration
__UpperCAmelCase : Dict =["""audio"""]
__UpperCAmelCase : Optional[int] =["""text"""]
def snake_case ( self , __a ):
return self.pre_processor(__a , return_tensors="pt" ).input_features
def snake_case ( self , __a ):
return self.model.generate(inputs=__a )
def snake_case ( self , __a ):
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0]
| 282 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = psutil.Process()
__lowerCAmelCase = False
def snake_case ( self ):
__lowerCAmelCase = -1
while True:
__lowerCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def snake_case ( self ):
__lowerCAmelCase = True
__lowerCAmelCase = threading.Thread(target=self.peak_monitor )
__lowerCAmelCase = True
self.thread.start()
def snake_case ( self ):
__lowerCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
A : Any = PeakCPUMemory()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = torch.cuda.memory_allocated(_UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = (torch.cuda.memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
__lowerCAmelCase = (torch.cuda.max_memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
return measures
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(_UpperCamelCase )]:.2f}MiB" )
__lowerCAmelCase = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 282 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """ctrl"""
UpperCAmelCase_ : Dict = ["""past_key_values"""]
UpperCAmelCase_ : Any = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __SCREAMING_SNAKE_CASE=246534 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1280 , __SCREAMING_SNAKE_CASE=8192 , __SCREAMING_SNAKE_CASE=48 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = dff
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = use_cache
super().__init__(**__SCREAMING_SNAKE_CASE )
| 312 | from math import factorial
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0 ) -> int:
return sum(int(snake_case__ ) for x in str(factorial(snake_case__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 312 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''▁'''
UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
UpperCAmelCase = {
'''facebook/xglm-564M''': 2048,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case = None , **snake_case , ):
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase = 7
lowercase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
lowercase = len(self.sp_model )
lowercase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case )
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
lowercase = self.__dict__.copy()
lowercase = None
lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case ):
lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case ))
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case ))
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.sp_model.encode(snake_case , out_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ''.join(snake_case ).replace(snake_case , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 565 |
from abc import ABC, abstractmethod
from typing import List, Optional
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self ):
# test for the above condition
self.test()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 0
lowercase = False
while not completed:
if counter == 1:
self.reset()
lowercase = self.advance()
if not self.does_advance(snake_case ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowercase , lowercase , lowercase = self.update(snake_case )
counter += 1
if counter > 1_0000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , snake_case=False ):
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case ):
super(snake_case , self ).__init__()
if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(snake_case , snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowercase = token_ids
lowercase = len(self.token_ids )
lowercase = -1 # the index of the currently fulfilled step
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(snake_case )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(snake_case )}''' )
lowercase = False
lowercase = False
lowercase = False
if self.does_advance(snake_case ):
self.fulfilled_idx += 1
lowercase = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase = True
lowercase = completed
else:
# failed to make progress.
lowercase = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = False
lowercase = 0
def SCREAMING_SNAKE_CASE__ ( self ):
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self , snake_case=False ):
lowercase = PhrasalConstraint(self.token_ids )
if stateful:
lowercase = self.seqlen
lowercase = self.fulfilled_idx
lowercase = self.completed
return new_constraint
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=True ):
lowercase = max([len(snake_case ) for one in nested_token_ids] )
lowercase = {}
for token_ids in nested_token_ids:
lowercase = root
for tidx, token_id in enumerate(snake_case ):
if token_id not in level:
lowercase = {}
lowercase = level[token_id]
if no_subsets and self.has_subsets(snake_case , snake_case ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
lowercase = root
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.trie
for current_token in current_seq:
lowercase = start[current_token]
lowercase = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.next_tokens(snake_case )
return len(snake_case ) == 0
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = list(root.values() )
if len(snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(snake_case ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = self.count_leaves(snake_case )
return len(snake_case ) != leaf_count
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case ):
super(snake_case , self ).__init__()
if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(snake_case , snake_case ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(snake_case , snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowercase = DisjunctiveTrie(snake_case )
lowercase = nested_token_ids
lowercase = self.trie.max_height
lowercase = []
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.trie.next_tokens(self.current_seq )
if len(snake_case ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case )}''' )
lowercase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case )}''' )
lowercase = False
lowercase = False
lowercase = False
if self.does_advance(snake_case ):
self.current_seq.append(snake_case )
lowercase = True
else:
lowercase = True
self.reset()
lowercase = self.trie.reached_leaf(self.current_seq )
lowercase = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = False
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=False ):
lowercase = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase = self.seqlen
lowercase = self.current_seq
lowercase = self.completed
return new_constraint
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = constraints
# max # of steps required to fulfill a given constraint
lowercase = max([c.seqlen for c in constraints] )
lowercase = len(snake_case )
lowercase = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = []
lowercase = None
lowercase = [constraint.copy(stateful=snake_case ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase = constraint.advance()
if isinstance(snake_case , snake_case ):
token_list.append(snake_case )
elif isinstance(snake_case , snake_case ):
token_list.extend(snake_case )
else:
lowercase = self.inprogress_constraint.advance()
if isinstance(snake_case , snake_case ):
token_list.append(snake_case )
elif isinstance(snake_case , snake_case ):
token_list.extend(snake_case )
if len(snake_case ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase , lowercase = self.add(snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowercase , lowercase = False, False
if self.completed:
lowercase = True
lowercase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase , lowercase , lowercase = self.inprogress_constraint.update(snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=snake_case ) )
lowercase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(snake_case ):
lowercase , lowercase , lowercase = pending_constraint.update(snake_case )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(snake_case )
lowercase = None
if not complete and stepped:
lowercase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self , snake_case=True ):
lowercase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase = [
constraint.copy(stateful=snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase = self.inprogress_constraint.copy(stateful=snake_case )
lowercase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 565 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class a_ :
__lowerCAmelCase : List[str]
__lowerCAmelCase : Optional[str] = None
# Automatically constructed
__lowerCAmelCase : ClassVar[str] = "dict"
__lowerCAmelCase : ClassVar[Any] = None
__lowerCAmelCase : str = field(default="""Translation""" , init=_a , repr=_a )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __UpperCamelCase ( self ):
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class a_ :
__lowerCAmelCase : Optional[List] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[str] = None
# Automatically constructed
__lowerCAmelCase : ClassVar[str] = "dict"
__lowerCAmelCase : ClassVar[Any] = None
__lowerCAmelCase : str = field(default="""TranslationVariableLanguages""" , init=_a , repr=_a )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase : str = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = set(self.languages )
if self.languages and set(snake_case_ ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(snake_case_ ) - lang_set ) )}) are not in valid set ({", ".join(snake_case_ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase : Optional[Any] = []
for lang, text in translation_dict.items():
if isinstance(snake_case_ , snake_case_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase : int = zip(*sorted(snake_case_ ) )
return {"language": languages, "translation": translations}
def __UpperCamelCase ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 384 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = """ResNetConfig"""
# Base docstring
UpperCamelCase_ = """microsoft/resnet-50"""
UpperCamelCase_ = [1, 20_48, 7, 7]
# Image classification docstring
UpperCamelCase_ = """microsoft/resnet-50"""
UpperCamelCase_ = """tiger cat"""
UpperCamelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 3 , snake_case_ = 1 , snake_case_ = "relu" ):
super().__init__()
_lowerCAmelCase : List[str] = nn.Convad(
snake_case_ , snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=kernel_size // 2 , bias=snake_case_ )
_lowerCAmelCase : Tuple = nn.BatchNormad(snake_case_ )
_lowerCAmelCase : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.convolution(snake_case_ )
_lowerCAmelCase : int = self.normalization(snake_case_ )
_lowerCAmelCase : str = self.activation(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ ):
super().__init__()
_lowerCAmelCase : str = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_lowerCAmelCase : Any = config.num_channels
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_lowerCAmelCase : int = self.embedder(snake_case_ )
_lowerCAmelCase : Dict = self.pooler(snake_case_ )
return embedding
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 2 ):
super().__init__()
_lowerCAmelCase : List[Any] = nn.Convad(snake_case_ , snake_case_ , kernel_size=1 , stride=snake_case_ , bias=snake_case_ )
_lowerCAmelCase : Union[str, Any] = nn.BatchNormad(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Dict = self.convolution(snake_case_ )
_lowerCAmelCase : Any = self.normalization(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 1 , snake_case_ = "relu" ):
super().__init__()
_lowerCAmelCase : Dict = in_channels != out_channels or stride != 1
_lowerCAmelCase : List[str] = (
ResNetShortCut(snake_case_ , snake_case_ , stride=snake_case_ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : List[str] = nn.Sequential(
ResNetConvLayer(snake_case_ , snake_case_ , stride=snake_case_ ) , ResNetConvLayer(snake_case_ , snake_case_ , activation=snake_case_ ) , )
_lowerCAmelCase : Tuple = ACTaFN[activation]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = hidden_state
_lowerCAmelCase : Union[str, Any] = self.layer(snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.shortcut(snake_case_ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 1 , snake_case_ = "relu" , snake_case_ = 4 ):
super().__init__()
_lowerCAmelCase : Tuple = in_channels != out_channels or stride != 1
_lowerCAmelCase : int = out_channels // reduction
_lowerCAmelCase : Any = (
ResNetShortCut(snake_case_ , snake_case_ , stride=snake_case_ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : Dict = nn.Sequential(
ResNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 ) , ResNetConvLayer(snake_case_ , snake_case_ , stride=snake_case_ ) , ResNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=snake_case_ ) , )
_lowerCAmelCase : Optional[Any] = ACTaFN[activation]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Dict = hidden_state
_lowerCAmelCase : Optional[Any] = self.layer(snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.shortcut(snake_case_ )
hidden_state += residual
_lowerCAmelCase : Any = self.activation(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 2 , snake_case_ = 2 , ):
super().__init__()
_lowerCAmelCase : Optional[Any] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
_lowerCAmelCase : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(snake_case_ , snake_case_ , stride=snake_case_ , activation=config.hidden_act ) , *[layer(snake_case_ , snake_case_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = input
for layer in self.layers:
_lowerCAmelCase : List[Any] = layer(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ ):
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
snake_case_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case_ , config.depths[1:] ):
self.stages.append(ResNetStage(snake_case_ , snake_case_ , snake_case_ , depth=snake_case_ ) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = False , snake_case_ = True ):
_lowerCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : Any = hidden_states + (hidden_state,)
_lowerCAmelCase : Dict = stage_module(snake_case_ )
if output_hidden_states:
_lowerCAmelCase : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case_ , hidden_states=snake_case_ , )
class a_ (_a ):
__lowerCAmelCase : str = ResNetConfig
__lowerCAmelCase : Dict = """resnet"""
__lowerCAmelCase : List[str] = """pixel_values"""
__lowerCAmelCase : Any = True
def __UpperCamelCase ( self , snake_case_ ):
if isinstance(snake_case_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(snake_case_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __UpperCamelCase ( self , snake_case_ , snake_case_=False ):
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = value
UpperCamelCase_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCamelCase_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , _a , )
class a_ (_a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : List[Any] = ResNetEmbeddings(snake_case_ )
_lowerCAmelCase : List[Any] = ResNetEncoder(snake_case_ )
_lowerCAmelCase : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None ):
_lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Union[str, Any] = self.embedder(snake_case_ )
_lowerCAmelCase : Tuple = self.encoder(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
_lowerCAmelCase : int = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(snake_case_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _a , )
class a_ (_a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ )
_lowerCAmelCase : Union[str, Any] = config.num_labels
_lowerCAmelCase : Any = ResNetModel(snake_case_ )
# classification head
_lowerCAmelCase : List[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
_lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Tuple = self.resnet(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
_lowerCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : int = self.classifier(snake_case_ )
_lowerCAmelCase : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : Tuple = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : Any = """single_label_classification"""
else:
_lowerCAmelCase : Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
_lowerCAmelCase : Optional[int] = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase : Union[str, Any] = loss_fct(snake_case_ , snake_case_ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : int = CrossEntropyLoss()
_lowerCAmelCase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : List[Any] = BCEWithLogitsLoss()
_lowerCAmelCase : List[Any] = loss_fct(snake_case_ , snake_case_ )
if not return_dict:
_lowerCAmelCase : List[str] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , _a , )
class a_ (_a , _a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ )
super()._init_backbone(snake_case_ )
_lowerCAmelCase : List[Any] = [config.embedding_size] + config.hidden_sizes
_lowerCAmelCase : List[Any] = ResNetEmbeddings(snake_case_ )
_lowerCAmelCase : str = ResNetEncoder(snake_case_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@replace_return_docstrings(output_type=snake_case_ , config_class=_CONFIG_FOR_DOC )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None ):
_lowerCAmelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[int] = self.embedder(snake_case_ )
_lowerCAmelCase : List[Any] = self.encoder(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
_lowerCAmelCase : Any = outputs.hidden_states
_lowerCAmelCase : Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_lowerCAmelCase : Any = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=snake_case_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case_ , )
| 384 | 1 |
'''simple docstring'''
from __future__ import annotations
A_ : Union[str, Any] ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase_ = {}
lowerCAmelCase_ = source_vertex
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = {self.source_vertex}
lowerCAmelCase_ = None
lowerCAmelCase_ = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowerCAmelCase )
lowerCAmelCase_ = vertex
queue.append(__lowerCAmelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase_ = self.parent.get(__lowerCAmelCase )
if target_vertex_parent is None:
lowerCAmelCase_ = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__lowerCAmelCase )
return self.shortest_path(__lowerCAmelCase ) + F'''->{target_vertex}'''
if __name__ == "__main__":
A_ : Union[str, Any] =Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 707 | '''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A_ : Tuple =[
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def snake_case_ ( __snake_case : Union[str, Any]) -> Optional[Any]:
for pegasus_name, hf_name in PATTERNS:
lowerCAmelCase_ = k.replace(__snake_case , __snake_case)
return k
def snake_case_ ( __snake_case : dict , __snake_case : dict) -> PegasusForConditionalGeneration:
lowerCAmelCase_ = DEFAULTS.copy()
cfg_kwargs.update(__snake_case)
lowerCAmelCase_ = PegasusConfig(**__snake_case)
lowerCAmelCase_ = PegasusForConditionalGeneration(__snake_case)
lowerCAmelCase_ = torch_model.model.state_dict()
lowerCAmelCase_ = {}
for k, v in tf_weights.items():
lowerCAmelCase_ = rename_state_dict_key(__snake_case)
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''')
if "dense" in k or "proj" in new_k:
lowerCAmelCase_ = v.T
lowerCAmelCase_ = torch.tensor(__snake_case , dtype=sd[new_k].dtype)
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowerCAmelCase_ = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1])
lowerCAmelCase_ = mapping['''shared.weight''']
lowerCAmelCase_ = mapping['''shared.weight''']
lowerCAmelCase_ = {k: torch.zeros_like(__snake_case) for k, v in sd.items() if k.endswith('''bias''') and k not in mapping}
mapping.update(**__snake_case)
lowerCAmelCase_ ,lowerCAmelCase_ = torch_model.model.load_state_dict(__snake_case , strict=__snake_case)
lowerCAmelCase_ = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def snake_case_ ( __snake_case : Optional[int]="./ckpt/aeslc/model.ckpt-32000") -> Dict:
lowerCAmelCase_ = tf.train.list_variables(__snake_case)
lowerCAmelCase_ = {}
lowerCAmelCase_ = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__snake_case , desc='''converting tf checkpoint to dict'''):
lowerCAmelCase_ = any(pat in name for pat in ignore_name)
if skip_key:
continue
lowerCAmelCase_ = tf.train.load_variable(__snake_case , __snake_case)
lowerCAmelCase_ = array
return tf_weights
def snake_case_ ( __snake_case : str , __snake_case : str) -> Optional[int]:
# save tokenizer first
lowerCAmelCase_ = Path(__snake_case).parent.name
lowerCAmelCase_ = task_specific_params[F'''summarization_{dataset}''']['''max_position_embeddings''']
lowerCAmelCase_ = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__snake_case)
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__snake_case)
# convert model
lowerCAmelCase_ = get_tf_weights_as_numpy(__snake_case)
lowerCAmelCase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowerCAmelCase_ = task_specific_params
lowerCAmelCase_ = convert_pegasus(__snake_case , __snake_case)
torch_model.save_pretrained(__snake_case)
lowerCAmelCase_ = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''')
sd.pop('''model.encoder.embed_positions.weight''')
torch.save(__snake_case , Path(__snake_case) / '''pytorch_model.bin''')
if __name__ == "__main__":
A_ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
A_ : Union[str, Any] =parser.parse_args()
if args.save_dir is None:
A_ : List[Any] =Path(args.tf_ckpt_path).parent.name
A_ : Optional[int] =os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 606 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase_ ) as mock_head:
UpperCamelCase = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
UpperCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(lowerCamelCase_ )
@is_staging_test
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@classmethod
def lowerCamelCase_ ( cls : Tuple ):
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : int ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = ViTImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
UpperCamelCase = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCamelCase_ , repo_id="""test-image-processor""" , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
UpperCamelCase = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ViTImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
UpperCamelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCamelCase_ , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
UpperCamelCase = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
UpperCamelCase = CustomImageProcessor.from_pretrained(lowerCamelCase_ )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
UpperCamelCase = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 537 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase : Any = logging.get_logger(__name__)
lowercase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : List[Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowercase : Union[str, Any] = {
"""roberta-base""": 5_1_2,
"""roberta-large""": 5_1_2,
"""roberta-large-mnli""": 5_1_2,
"""distilroberta-base""": 5_1_2,
"""roberta-base-openai-detector""": 5_1_2,
"""roberta-large-openai-detector""": 5_1_2,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[Any] = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = ['''input_ids''', '''attention_mask''']
__A : Tuple = RobertaTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : Optional[int] = add_prefix_space
a__ : Optional[int] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
a__ : Dict = 'post_processor'
a__ : Union[str, Any] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : List[str] = tuple(state['sep'])
if "cls" in state:
a__ : Any = tuple(state['cls'])
a__ : Union[str, Any] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : int = add_prefix_space
a__ : Dict = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : List[str] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Any = getattr(lowercase , state.pop('type'))
a__ : str = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : List[str] = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Any = kwargs.get('is_split_into_words' , lowercase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Dict = kwargs.get('is_split_into_words' , lowercase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Optional[Any]:
'''simple docstring'''
a__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : Union[str, Any] = [self.sep_token_id]
a__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 302 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_A : Union[str, Any] = logging.getLogger(__name__)
_A : Tuple = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_A : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase_ )} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
_UpperCAmelCase : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
def __lowerCamelCase ( self : Optional[int] ) ->Any:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "The name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase : Optional[str] = field(default=lowerCAmelCase_ ,metadata={"help": "The input training data file (a text file)."} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCAmelCase : Optional[int] = field(
default=5 ,metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} ,)
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} ,)
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={"help": "The number of processes to use for the preprocessing."} ,)
_UpperCAmelCase : float = field(
default=0.15 ,metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
if self.train_file is not None:
lowerCamelCase__ : List[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase__ : Tuple = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Optional[int] = [json.loads(UpperCAmelCase ) for line in f.read().splitlines() if (len(UpperCAmelCase ) > 0 and not line.isspace())]
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
lowerCamelCase__ : List[Any] = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase__ : int = refs
return Dataset.from_dict(UpperCAmelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , )
lowerCamelCase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , )
else:
lowerCamelCase__ : Optional[int] = {}
if data_args.train_file is not None:
lowerCamelCase__ : List[str] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ : int = data_args.validation_file
lowerCamelCase__ : List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
lowerCamelCase__ : Any = '''text'''
lowerCamelCase__ : Union[str, Any] = load_dataset(UpperCAmelCase , data_files=UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Any = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Optional[int] = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
lowerCamelCase__ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
lowerCamelCase__ : Any = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
lowerCamelCase__ : List[str] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowerCamelCase__ : Tuple = AutoModelForMaskedLM.from_config(UpperCAmelCase )
model.resize_token_embeddings(len(UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase__ : Optional[Any] = datasets['''train'''].column_names
else:
lowerCamelCase__ : Union[str, Any] = datasets['''validation'''].column_names
lowerCamelCase__ : int = '''text''' if '''text''' in column_names else column_names[0]
lowerCamelCase__ : Union[str, Any] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase ):
# Remove empty lines
lowerCamelCase__ : List[str] = [line for line in examples['''text'''] if len(UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=data_args.max_seq_length )
lowerCamelCase__ : Union[str, Any] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase__ : List[Any] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase__ : str = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase__ : Dict = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase__ : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase__ : Optional[int] = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase__ : str = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase__ : Tuple = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase__ : Dict = model_args.model_name_or_path
else:
lowerCamelCase__ : Any = None
lowerCamelCase__ : Any = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ : str = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
lowerCamelCase__ : Dict = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ : List[Any] = trainer.evaluate()
lowerCamelCase__ : str = math.exp(eval_output['''eval_loss'''] )
lowerCamelCase__ : Union[str, Any] = perplexity
lowerCamelCase__ : Optional[Any] = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
return results
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 130 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_A : Optional[int] = random.Random()
def _a ( UpperCAmelCase , UpperCAmelCase=1.0 , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
lowerCamelCase__ : int = global_rng
lowerCamelCase__ : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Any=7 , A : Dict=4_0_0 , A : Optional[int]=2_0_0_0 , A : Dict=1 , A : List[str]=0.0 , A : Optional[int]=1_6_0_0_0 , A : Tuple=True , A : Any=True , ) ->Tuple:
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = min_seq_length
lowerCamelCase__ : int = max_seq_length
lowerCamelCase__ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__ : List[str] = feature_size
lowerCamelCase__ : int = padding_value
lowerCamelCase__ : Optional[Any] = sampling_rate
lowerCamelCase__ : Optional[Any] = return_attention_mask
lowerCamelCase__ : Optional[Any] = do_normalize
def __lowerCamelCase ( self : List[Any] ) ->Tuple:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self : Any , A : Union[str, Any]=False , A : List[Any]=False ) ->Dict:
def _flatten(A : Optional[Any] ):
return list(itertools.chain(*A ) )
if equal_length:
lowerCamelCase__ : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCamelCase__ : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase__ : Optional[Any] = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor
def __lowerCamelCase ( self : Tuple ) ->Dict:
lowerCamelCase__ : Any = WavaVecaFeatureExtractionTester(self )
def __lowerCamelCase ( self : List[Any] , A : str ) ->List[str]:
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__ : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ : str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
lowerCamelCase__ : List[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowerCamelCase__ : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
lowerCamelCase__ : Tuple = feat_extract(A , return_tensors='''np''' ).input_values
lowerCamelCase__ : List[Any] = feat_extract(A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase__ : str = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCamelCase__ : str = np.asarray(A )
lowerCamelCase__ : Optional[int] = feat_extract(A , return_tensors='''np''' ).input_values
lowerCamelCase__ : Tuple = feat_extract(A , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ : List[str] = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase__ : str = [None, 1_6_0_0, None]
for max_length, padding in zip(A , A ):
lowerCamelCase__ : str = feat_extract(A , padding=A , max_length=A , return_tensors='''np''' )
lowerCamelCase__ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __lowerCamelCase ( self : int ) ->Dict:
lowerCamelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Any = range(8_0_0 , 1_4_0_0 , 2_0_0 )
lowerCamelCase__ : List[Any] = [floats_list((1, x) )[0] for x in lengths]
lowerCamelCase__ : List[Any] = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase__ : Dict = [None, 1_6_0_0, None]
for max_length, padding in zip(A , A ):
lowerCamelCase__ : Optional[int] = feat_extract(A , max_length=A , padding=A )
lowerCamelCase__ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
lowerCamelCase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ : Union[str, Any] = feat_extract(
A , truncation=A , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
lowerCamelCase__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowerCamelCase ( self : Union[str, Any] ) ->List[str]:
lowerCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ : Union[str, Any] = feat_extract(
A , truncation=A , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
lowerCamelCase__ : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
lowerCamelCase__ : str = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ : str = feat_extract(
A , truncation=A , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
lowerCamelCase__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def __lowerCamelCase ( self : Optional[int] ) ->List[Any]:
import torch
lowerCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Optional[Any] = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCamelCase__ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__ : Dict = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCamelCase__ : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def __lowerCamelCase ( self : int ) ->Optional[int]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCamelCase__ : int = WavaVecaConfig.from_pretrained(A )
lowerCamelCase__ : Dict = WavaVecaFeatureExtractor.from_pretrained(A )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 130 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
if collection == []:
return []
# get some information about the collection
UpperCAmelCase__ : Tuple = len(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = max(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = min(UpperCamelCase_ )
# create the counting array
UpperCAmelCase__ : int = coll_max + 1 - coll_min
UpperCAmelCase__ : List[str] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase__ : Optional[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase_ ) ):
UpperCAmelCase__ : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a__ ( lowerCAmelCase__ ) -> Tuple:
return "".join([chr(UpperCamelCase_ ) for i in counting_sort([ord(UpperCamelCase_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 75 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =FunnelTokenizer
a_ =FunnelTokenizerFast
a_ =True
a_ =True
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "UNwant\u00E9d,running"
lowerCAmelCase__ = "unwanted, running"
return input_text, output_text
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
lowerCAmelCase__ = tokenizer("UNwant\u00E9d,running" )
lowerCAmelCase__ = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
lowerCAmelCase__ = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 339 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : List[str] = IFPipeline
snake_case__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
snake_case__ : int = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ : Optional[int] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=0 ) -> int:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : int = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
# if
a_ : Any = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
a_ : Any = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
a_ , a_ : Union[str, Any] = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a_ : Any = None
a_ : List[str] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a_ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
a_ : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a_ : Dict = IFInpaintingPipeline(**pipe_a.components )
a_ : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
# pipeline 1
_start_torch_memory_measurement()
a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : Any = output.images[0]
assert image.shape == (6_4, 6_4, 3)
a_ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
a_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
a_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='np' , )
a_ : str = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
a_ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
a_ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
a_ : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Tuple = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : Any = output.images[0]
assert image.shape == (6_4, 6_4, 3)
a_ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
a_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
a_ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Any = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='np' , )
a_ : List[str] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
a_ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
a_ : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
a_ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Tuple = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
a_ : Union[str, Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
a_ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
a_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
a_ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Dict = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
a_ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='np' , )
a_ : Any = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
a_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
a_ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 702 |
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ : List[Any] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
UpperCAmelCase_ : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
UpperCAmelCase_ : Optional[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="binary" , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Any:
a_ : List[Any] = fa_score(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , pos_label=SCREAMING_SNAKE_CASE__ , average=SCREAMING_SNAKE_CASE__ , sample_weight=SCREAMING_SNAKE_CASE__ )
return {"f1": float(SCREAMING_SNAKE_CASE__ ) if score.size == 1 else score}
| 443 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Any = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
A : Optional[int] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 516 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
SCREAMING_SNAKE_CASE_ : Optional[Any] = xgb.predict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = fetch_california_housing()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = data_handling(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
SCREAMING_SNAKE_CASE_ : int = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
print(F"Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 421 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _lowerCamelCase ( A_ : Dict , A_ : Optional[Any] , A_ : Any , A_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int =multiprocessing.Manager()
UpperCamelCase__ : Optional[Any] =manager.list()
UpperCamelCase__ : List[str] =multiprocessing.Process(target=lowercase_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _lowerCamelCase ( A_ : Any , A_ : Dict , A_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCamelCase__ : Optional[int] =shutil.rmtree
UpperCamelCase__ : List[str] =os.rmdir
UpperCamelCase__ : List[Any] =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCamelCase__ : Optional[Any] ={}
with swallow_io():
with time_limit(lowercase_ ):
exec(lowercase_ , lowercase_ )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
UpperCamelCase__ : Dict =rmtree
UpperCamelCase__ : Any =rmdir
UpperCamelCase__ : Tuple =chdir
@contextlib.contextmanager
def _lowerCamelCase ( A_ : str ) -> Optional[int]:
'''simple docstring'''
def signal_handler(A_ : Tuple , A_ : Union[str, Any] ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , lowercase_ )
signal.signal(signal.SIGALRM , lowercase_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : int =WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase_ ):
with contextlib.redirect_stderr(lowercase_ ):
with redirect_stdin(lowercase_ ):
yield
@contextlib.contextmanager
def _lowerCamelCase ( ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase_ ):
yield dirname
class lowercase__( snake_case__ ):
'''simple docstring'''
pass
class lowercase__( io.StringIO ):
'''simple docstring'''
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
return False
class lowercase__( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
snake_case__ = '''stdin'''
@contextlib.contextmanager
def _lowerCamelCase ( A_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if root == ".":
yield
return
UpperCamelCase__ : List[str] =os.getcwd()
os.chdir(lowercase_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase_ )
def _lowerCamelCase ( A_ : List[Any]=None ) -> Tuple:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCamelCase__ : Dict =None
UpperCamelCase__ : List[Any] =None
import os
UpperCamelCase__ : List[str] ="""1"""
UpperCamelCase__ : Optional[Any] =None
UpperCamelCase__ : List[Any] =None
UpperCamelCase__ : Tuple =None
UpperCamelCase__ : List[Any] =None
UpperCamelCase__ : int =None
UpperCamelCase__ : List[str] =None
UpperCamelCase__ : Union[str, Any] =None
UpperCamelCase__ : Any =None
UpperCamelCase__ : Any =None
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Union[str, Any] =None
UpperCamelCase__ : Tuple =None
UpperCamelCase__ : Tuple =None
UpperCamelCase__ : Any =None
UpperCamelCase__ : Dict =None
UpperCamelCase__ : Union[str, Any] =None
UpperCamelCase__ : Union[str, Any] =None
UpperCamelCase__ : str =None
UpperCamelCase__ : Any =None
UpperCamelCase__ : List[str] =None
UpperCamelCase__ : Tuple =None
UpperCamelCase__ : List[Any] =None
UpperCamelCase__ : Optional[Any] =None
UpperCamelCase__ : Union[str, Any] =None
UpperCamelCase__ : Union[str, Any] =None
UpperCamelCase__ : Any =None
UpperCamelCase__ : int =None
import shutil
UpperCamelCase__ : List[Any] =None
UpperCamelCase__ : List[str] =None
UpperCamelCase__ : Union[str, Any] =None
import subprocess
UpperCamelCase__ : Optional[Any] =None # type: ignore
UpperCamelCase__ : List[str] =None
import sys
UpperCamelCase__ : List[Any] =None
UpperCamelCase__ : Any =None
UpperCamelCase__ : int =None
UpperCamelCase__ : List[str] =None
UpperCamelCase__ : List[Any] =None
| 720 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__( snake_case__ , unittest.TestCase ):
'''simple docstring'''
snake_case__ = LEDTokenizer
snake_case__ = LEDTokenizerFast
snake_case__ = True
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
super().setUp()
UpperCamelCase__ : Any =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ : int =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
UpperCamelCase__ : Optional[int] =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ : Optional[int] ={"unk_token": "<unk>"}
UpperCamelCase__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
UpperCamelCase__ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(__SCREAMING_SNAKE_CASE))
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384")
@cached_property
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384")
@require_torch
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ : Optional[int] =[0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Any =tokenizer(__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE) , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
UpperCamelCase__ : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@require_torch
def UpperCAmelCase ( self) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : int =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIn("input_ids" , __SCREAMING_SNAKE_CASE)
self.assertIn("attention_mask" , __SCREAMING_SNAKE_CASE)
self.assertNotIn("labels" , __SCREAMING_SNAKE_CASE)
self.assertNotIn("decoder_attention_mask" , __SCREAMING_SNAKE_CASE)
@require_torch
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =[
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Tuple =tokenizer(text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt")
self.assertEqual(32 , targets["input_ids"].shape[1])
@require_torch
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Optional[int] =tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors="pt")
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(batch.input_ids.shape , (2, 51_22))
@require_torch
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =["A long paragraph for summarization."]
UpperCamelCase__ : Any =[
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : str =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt")
UpperCamelCase__ : str =tokenizer(text_target=__SCREAMING_SNAKE_CASE , return_tensors="pt")
UpperCamelCase__ : int =inputs["input_ids"]
UpperCamelCase__ : Tuple =targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ : Any =["Summary of the text.", "Another summary."]
UpperCamelCase__ : List[str] =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase__ : str =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] =[[0] * len(__SCREAMING_SNAKE_CASE) for x in encoded_output["input_ids"]]
UpperCamelCase__ : Any =tokenizer.pad(__SCREAMING_SNAKE_CASE)
self.assertSequenceEqual(outputs["global_attention_mask"] , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
UpperCamelCase__ : Dict =self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] ="A, <mask> AllenNLP sentence."
UpperCamelCase__ : List[Any] =tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
self.assertEqual(sum(tokens_r["token_type_ids"]) , sum(tokens_p["token_type_ids"]))
self.assertEqual(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]) , sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]) , )
UpperCamelCase__ : List[str] =tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
UpperCamelCase__ : str =tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2])
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
| 582 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = False ):
'''simple docstring'''
if not arr:
return 0
UpperCAmelCase__ : str = 0 if allow_empty_subarrays else float("""-inf""" )
UpperCAmelCase__ : List[Any] = 0.0
for num in arr:
UpperCAmelCase__ : Optional[Any] = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCAmelCase__ : Dict = max(__UpperCamelCase , __UpperCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 65 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = RoFormerTokenizer
_snake_case : Optional[Any] = RoFormerTokenizerFast
_snake_case : int = True
_snake_case : Tuple = True
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
def _snake_case ( self : Optional[int] , **lowerCamelCase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCamelCase )
def _snake_case ( self : List[Any] , **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = "永和服装饰品有限公司,今天天气非常好"
__lowercase = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase , __lowercase = self.get_chinese_input_output_texts()
__lowercase = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , output_text.split() )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_rust_tokenizer()
__lowercase , __lowercase = self.get_chinese_input_output_texts()
__lowercase = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , output_text.split() )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
pass
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _snake_case ( self : str ):
'''simple docstring'''
pass
| 402 | 0 |
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __a : list[int] , __a : list[int] , __a : int ):
'''simple docstring'''
UpperCamelCase__ = [0] * no_of_processes
UpperCamelCase__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__a ):
UpperCamelCase__ = burst_time[i]
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
UpperCamelCase__ = []
UpperCamelCase__ = -1
for i in range(__a ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__a )
if len(__a ) > 0:
UpperCamelCase__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
UpperCamelCase__ = i
total_time += burst_time[target_process]
completed += 1
UpperCamelCase__ = 0
UpperCamelCase__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __a : list[int] , __a : int , __a : list[int] ):
'''simple docstring'''
UpperCamelCase__ = [0] * no_of_processes
for i in range(__a ):
UpperCamelCase__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
lowerCamelCase_ = 4
lowerCamelCase_ = [2, 5, 3, 7]
lowerCamelCase_ = [0, 0, 0, 0]
lowerCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
f'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
f'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(f'\nAverage waiting time = {mean(waiting_time):.5f}')
print(f'Average turnaround time = {mean(turn_around_time):.5f}')
| 705 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCamelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(__a , __a )
lowerCamelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
UpperCamelCase__ = list(s_dict.keys() )
for key in keys:
UpperCamelCase__ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
UpperCamelCase__ = new_key.replace(__a , __a )
print(f"{key} -> {new_key}" )
UpperCamelCase__ = s_dict.pop(__a )
return s_dict
def __magic_name__ ( __a : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(__a , __a , bias=__a )
UpperCamelCase__ = emb.weight.data
return lin_layer
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
os.makedirs(__a , exist_ok=__a )
UpperCamelCase__ = os.path.basename(__a )
UpperCamelCase__ = url.split("""/""" )[-2]
UpperCamelCase__ = os.path.join(__a , __a )
if os.path.exists(__a ) and not os.path.isfile(__a ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(__a ):
UpperCamelCase__ = open(__a , """rb""" ).read()
if hashlib.shaaaa(__a ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(__a ) as source, open(__a , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__a , unit_divisor=1_024 ) as loop:
while True:
UpperCamelCase__ = source.read(8_192 )
if not buffer:
break
output.write(__a )
loop.update(len(__a ) )
UpperCamelCase__ = open(__a , """rb""" ).read()
if hashlib.shaaaa(__a ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
UpperCamelCase__ = _download(_MODELS[checkpoint_path] )
else:
UpperCamelCase__ = torch.load(__a , map_location="""cpu""" )
UpperCamelCase__ = original_checkpoint["""dims"""]
UpperCamelCase__ = original_checkpoint["""model_state_dict"""]
UpperCamelCase__ = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(__a )
rename_keys(__a )
UpperCamelCase__ = True
UpperCamelCase__ = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
UpperCamelCase__ = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__a , decoder_ffn_dim=__a , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
UpperCamelCase__ = WhisperForConditionalGeneration(__a )
UpperCamelCase__ , UpperCamelCase__ = model.model.load_state_dict(__a , strict=__a )
if len(__a ) > 0 and not set(__a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
UpperCamelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase__ = proj_out_weights
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 86 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.