repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/transformer.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import Tensor, nn
import math
from typing import Tuple, Type
from .common import MLPBlock
class TwoWayTransformer(nn.Module):
def __init__(
self,
depth: int,
embedding_dim: int,
num_heads: int,
mlp_dim: int,
activation: Type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
) -> None:
"""
A transformer decoder that attends to an input image using
queries whose positional embedding is supplied.
Args:
depth (int): number of layers in the transformer
embedding_dim (int): the channel dimension for the input embeddings
num_heads (int): the number of heads for multihead attention. Must
divide embedding_dim
mlp_dim (int): the channel dimension internal to the MLP block
activation (nn.Module): the activation to use in the MLP block
"""
super().__init__()
self.depth = depth
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.mlp_dim = mlp_dim
self.layers = nn.ModuleList()
for i in range(depth):
self.layers.append(
TwoWayAttentionBlock(
embedding_dim=embedding_dim,
num_heads=num_heads,
mlp_dim=mlp_dim,
activation=activation,
attention_downsample_rate=attention_downsample_rate,
skip_first_layer_pe=(i == 0),
)
)
self.final_attn_token_to_image = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.norm_final_attn = nn.LayerNorm(embedding_dim)
def forward(
self,
image_embedding: Tensor,
image_pe: Tensor,
point_embedding: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Args:
image_embedding (torch.Tensor): image to attend to. Should be shape
B x embedding_dim x h x w for any h and w.
image_pe (torch.Tensor): the positional encoding to add to the image. Must
have the same shape as image_embedding.
point_embedding (torch.Tensor): the embedding to add to the query points.
Must have shape B x N_points x embedding_dim for any N_points.
Returns:
torch.Tensor: the processed point_embedding
torch.Tensor: the processed image_embedding
"""
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
bs, c, h, w = image_embedding.shape
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
image_pe = image_pe.flatten(2).permute(0, 2, 1)
# Prepare queries
queries = point_embedding
keys = image_embedding
# Apply transformer blocks and final layernorm
for layer in self.layers:
queries, keys = layer(
queries=queries,
keys=keys,
query_pe=point_embedding,
key_pe=image_pe,
)
# Apply the final attenion layer from the points to the image
q = queries + point_embedding
k = keys + image_pe
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm_final_attn(queries)
return queries, keys
class TwoWayAttentionBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
num_heads: int,
mlp_dim: int = 2048,
activation: Type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
skip_first_layer_pe: bool = False,
) -> None:
"""
A transformer block with four layers: (1) self-attention of sparse
inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
block on sparse inputs, and (4) cross attention of dense inputs to sparse
inputs.
Arguments:
embedding_dim (int): the channel dimension of the embeddings
num_heads (int): the number of heads in the attention layers
mlp_dim (int): the hidden dimension of the mlp block
activation (nn.Module): the activation of the mlp block
skip_first_layer_pe (bool): skip the PE on the first layer
"""
super().__init__()
self.self_attn = Attention(embedding_dim, num_heads)
self.norm1 = nn.LayerNorm(embedding_dim)
self.cross_attn_token_to_image = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.norm2 = nn.LayerNorm(embedding_dim)
self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
self.norm3 = nn.LayerNorm(embedding_dim)
self.norm4 = nn.LayerNorm(embedding_dim)
self.cross_attn_image_to_token = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.skip_first_layer_pe = skip_first_layer_pe
def forward(
self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
) -> Tuple[Tensor, Tensor]:
# Self attention block
if self.skip_first_layer_pe:
queries = self.self_attn(q=queries, k=queries, v=queries)
else:
q = queries + query_pe
attn_out = self.self_attn(q=q, k=q, v=queries)
queries = queries + attn_out
queries = self.norm1(queries)
# Cross attention block, tokens attending to image embedding
q = queries + query_pe
k = keys + key_pe
attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm2(queries)
# MLP block
mlp_out = self.mlp(queries)
queries = queries + mlp_out
queries = self.norm3(queries)
# Cross attention block, image embedding attending to tokens
q = queries + query_pe
k = keys + key_pe
attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
keys = keys + attn_out
keys = self.norm4(keys)
return queries, keys
class Attention(nn.Module):
"""
An attention layer that allows for downscaling the size of the embedding
after projection to queries, keys, and values.
"""
def __init__(
self,
embedding_dim: int,
num_heads: int,
downsample_rate: int = 1,
) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.internal_dim = embedding_dim // downsample_rate
self.num_heads = num_heads
assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
b, n, c = x.shape
x = x.reshape(b, n, num_heads, c // num_heads)
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
def _recombine_heads(self, x: Tensor) -> Tensor:
b, n_heads, n_tokens, c_per_head = x.shape
x = x.transpose(1, 2)
return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
# Input projections
q = self.q_proj(q)
k = self.k_proj(k)
v = self.v_proj(v)
# Separate into heads
q = self._separate_heads(q, self.num_heads)
k = self._separate_heads(k, self.num_heads)
v = self._separate_heads(v, self.num_heads)
# Attention
_, _, _, c_per_head = q.shape
attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
attn = attn / math.sqrt(c_per_head)
attn = torch.softmax(attn, dim=-1)
# Get output
out = attn @ v
out = self._recombine_heads(out)
out = self.out_proj(out)
return out
| 8,396 | 33.842324 | 89 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/common.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from typing import Type
class MLPBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
mlp_dim: int,
act: Type[nn.Module] = nn.GELU,
) -> None:
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lin2(self.act(self.lin1(x)))
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
| 1,479 | 32.636364 | 136 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/sam.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
from typing import Any, Dict, List, Tuple
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
pixel_mean: List[float] = [123.675, 116.28, 103.53],
pixel_std: List[float] = [58.395, 57.12, 57.375],
) -> None:
"""
SAM predicts object masks from an image and input prompts.
Arguments:
image_encoder (ImageEncoderViT): The backbone used to encode the
image into image embeddings that allow for efficient mask prediction.
prompt_encoder (PromptEncoder): Encodes various types of input prompts.
mask_decoder (MaskDecoder): Predicts masks from the image embeddings
and encoded prompts.
pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
pixel_std (list(float)): Std values for normalizing pixels in the input image.
"""
super().__init__()
self.image_encoder = image_encoder
self.prompt_encoder = prompt_encoder
self.mask_decoder = mask_decoder
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
@property
def device(self) -> Any:
return self.pixel_mean.device
@torch.no_grad()
def forward(
self,
batched_input: List[Dict[str, Any]],
multimask_output: bool,
) -> List[Dict[str, torch.Tensor]]:
"""
Predicts masks end-to-end from provided images and prompts.
If prompts are not known in advance, using SamPredictor is
recommended over calling the model directly.
Arguments:
batched_input (list(dict)): A list over input images, each a
dictionary with the following keys. A prompt key can be
excluded if it is not present.
'image': The image as a torch tensor in 3xHxW format,
already transformed for input to the model.
'original_size': (tuple(int, int)) The original size of
the image before transformation, as (H, W).
'point_coords': (torch.Tensor) Batched point prompts for
this image, with shape BxNx2. Already transformed to the
input frame of the model.
'point_labels': (torch.Tensor) Batched labels for point prompts,
with shape BxN.
'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
Already transformed to the input frame of the model.
'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
in the form Bx1xHxW.
multimask_output (bool): Whether the model should predict multiple
disambiguating masks, or return a single mask.
Returns:
(list(dict)): A list over input images, where each element is
as dictionary with the following keys.
'masks': (torch.Tensor) Batched binary mask predictions,
with shape BxCxHxW, where B is the number of input promts,
C is determiend by multimask_output, and (H, W) is the
original size of the image.
'iou_predictions': (torch.Tensor) The model's predictions
of mask quality, in shape BxC.
'low_res_logits': (torch.Tensor) Low resolution logits with
shape BxCxHxW, where H=W=256. Can be passed as mask input
to subsequent iterations of prediction.
"""
input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
image_embeddings = self.image_encoder(input_images)
outputs = []
for image_record, curr_embedding in zip(batched_input, image_embeddings):
if "point_coords" in image_record:
points = (image_record["point_coords"], image_record["point_labels"])
else:
points = None
sparse_embeddings, dense_embeddings = self.prompt_encoder(
points=points,
boxes=image_record.get("boxes", None),
masks=image_record.get("mask_inputs", None),
)
low_res_masks, iou_predictions = self.mask_decoder(
image_embeddings=curr_embedding.unsqueeze(0),
image_pe=self.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
masks = self.postprocess_masks(
low_res_masks,
input_size=image_record["image"].shape[-2:],
original_size=image_record["original_size"],
)
masks = masks > self.mask_threshold
outputs.append(
{
"masks": masks,
"iou_predictions": iou_predictions,
"low_res_logits": low_res_masks,
}
)
return outputs
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
masks = F.interpolate(
masks,
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
return masks
def preprocess(self, x: torch.Tensor) -> torch.Tensor:
"""Normalize pixel values and pad to a square input."""
# Normalize colors
x = (x - self.pixel_mean) / self.pixel_std
# Pad
h, w = x.shape[-2:]
padh = self.image_encoder.img_size - h
padw = self.image_encoder.img_size - w
x = F.pad(x, (0, padw, 0, padh))
return x
| 7,225 | 40.291429 | 95 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/__init__.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .sam import Sam
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
from .transformer import TwoWayTransformer
| 385 | 31.166667 | 61 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_paint_by_example.py | from pathlib import Path
import cv2
import pytest
import torch
from PIL import Image
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy
from lama_cleaner.tests.test_model import get_config, get_data
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / 'result'
save_dir.mkdir(exist_ok=True, parents=True)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
def assert_equal(
model, config, gt_name,
fx: float = 1, fy: float = 1,
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
example_p=current_dir / "bunny.jpeg",
):
img, mask = get_data(fx=fx, fy=fy, img_p=img_p, mask_p=mask_p)
example_image = cv2.imread(str(example_p))
example_image = cv2.cvtColor(example_image, cv2.COLOR_BGRA2RGB)
example_image = cv2.resize(example_image, None, fx=fx, fy=fy, interpolation=cv2.INTER_AREA)
print(f"Input image shape: {img.shape}, example_image: {example_image.shape}")
config.paint_by_example_example_image = Image.fromarray(example_image)
res = model(img, mask, config)
cv2.imwrite(str(save_dir / gt_name), res)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_paint_by_example(strategy):
model = ModelManager(name="paint_by_example", device=device, disable_nsfw=True)
cfg = get_config(strategy, paint_by_example_steps=30)
assert_equal(
model,
cfg,
f"paint_by_example_{strategy.capitalize()}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fy=0.9,
fx=1.3,
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_paint_by_example_disable_nsfw(strategy):
model = ModelManager(name="paint_by_example", device=device, disable_nsfw=False)
cfg = get_config(strategy, paint_by_example_steps=30)
assert_equal(
model,
cfg,
f"paint_by_example_{strategy.capitalize()}_disable_nsfw.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_paint_by_example_sd_scale(strategy):
model = ModelManager(name="paint_by_example", device=device, disable_nsfw=True)
cfg = get_config(strategy, paint_by_example_steps=30, sd_scale=0.85)
assert_equal(
model,
cfg,
f"paint_by_example_{strategy.capitalize()}_sdscale.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fy=0.9,
fx=1.3
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_paint_by_example_cpu_offload(strategy):
model = ModelManager(name="paint_by_example", device=device, cpu_offload=True, disable_nsfw=False)
cfg = get_config(strategy, paint_by_example_steps=30, sd_scale=0.85)
assert_equal(
model,
cfg,
f"paint_by_example_{strategy.capitalize()}_cpu_offload.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_paint_by_example_cpu_offload_cpu_device(strategy):
model = ModelManager(name="paint_by_example", device=torch.device('cpu'), cpu_offload=True, disable_nsfw=True)
cfg = get_config(strategy, paint_by_example_steps=1, sd_scale=0.85)
assert_equal(
model,
cfg,
f"paint_by_example_{strategy.capitalize()}_cpu_offload_cpu_device.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fy=0.9,
fx=1.3
)
| 3,985 | 36.252336 | 114 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_load_img.py | from pathlib import Path
from lama_cleaner.helper import load_img
current_dir = Path(__file__).parent.absolute().resolve()
png_img_p = current_dir / "image.png"
jpg_img_p = current_dir / "bunny.jpeg"
def test_load_png_image():
with open(png_img_p, "rb") as f:
np_img, alpha_channel = load_img(f.read())
assert np_img.shape == (256, 256, 3)
assert alpha_channel.shape == (256, 256)
def test_load_jpg_image():
with open(jpg_img_p, "rb") as f:
np_img, alpha_channel = load_img(f.read())
assert np_img.shape == (394, 448, 3)
assert alpha_channel is None
| 596 | 26.136364 | 56 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_instruct_pix2pix.py | from pathlib import Path
import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.tests.test_model import get_config, assert_equal
from lama_cleaner.schema import HDStrategy
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / 'result'
save_dir.mkdir(exist_ok=True, parents=True)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
@pytest.mark.parametrize("disable_nsfw", [True, False])
@pytest.mark.parametrize("cpu_offload", [False, True])
def test_instruct_pix2pix(disable_nsfw, cpu_offload):
sd_steps = 50 if device == 'cuda' else 1
model = ModelManager(name="instruct_pix2pix",
device=torch.device(device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=False,
cpu_offload=cpu_offload)
cfg = get_config(strategy=HDStrategy.ORIGINAL, prompt='What if it were snowing?', p2p_steps=sd_steps, sd_scale=1.1)
name = f"device_{device}_disnsfw_{disable_nsfw}_cpu_offload_{cpu_offload}"
assert_equal(
model,
cfg,
f"instruct_pix2pix_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3
)
@pytest.mark.parametrize("disable_nsfw", [False])
@pytest.mark.parametrize("cpu_offload", [False])
def test_instruct_pix2pix_snow(disable_nsfw, cpu_offload):
sd_steps = 50 if device == 'cuda' else 1
model = ModelManager(name="instruct_pix2pix",
device=torch.device(device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=False,
cpu_offload=cpu_offload)
cfg = get_config(strategy=HDStrategy.ORIGINAL, prompt='What if it were snowing?', p2p_steps=sd_steps)
name = f"snow"
assert_equal(
model,
cfg,
f"instruct_pix2pix_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
| 2,322 | 35.873016 | 119 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_controlnet.py | import os
from lama_cleaner.const import SD_CONTROLNET_CHOICES
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler
from lama_cleaner.tests.test_model import get_config, assert_equal
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
@pytest.mark.parametrize("cpu_textencoder", [True])
@pytest.mark.parametrize("disable_nsfw", [True])
@pytest.mark.parametrize("sd_controlnet_method", SD_CONTROLNET_CHOICES)
def test_runway_sd_1_5(
sd_device, strategy, sampler, cpu_textencoder, disable_nsfw, sd_controlnet_method
):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name="sd1.5",
sd_controlnet=True,
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
sd_controlnet_method=sd_controlnet_method,
)
controlnet_conditioning_scale = {
"control_v11p_sd15_canny": 0.4,
"control_v11p_sd15_openpose": 0.4,
"control_v11p_sd15_inpaint": 1.0,
"control_v11f1p_sd15_depth": 1.0,
}[sd_controlnet_method]
cfg = get_config(
strategy,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_conditioning_scale=controlnet_conditioning_scale,
controlnet_method=sd_controlnet_method,
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_disable_nsfw"
assert_equal(
model,
cfg,
f"sd_controlnet_{sd_controlnet_method}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.2,
fy=1.2,
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_local_file_path(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name="sd1.5",
sd_controlnet=True,
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_local_model_path="/Users/cwq/data/models/sd-v1-5-inpainting.ckpt",
sd_controlnet_method="control_v11p_sd15_canny",
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_method="control_v11p_sd15_canny",
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sd_controlnet_canny_local_model_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_local_file_path_controlnet_native_inpainting(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name="sd1.5",
sd_controlnet=True,
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_local_model_path="/Users/cwq/data/models/v1-5-pruned-emaonly.safetensors",
sd_controlnet_method="control_v11p_sd15_inpaint",
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_conditioning_scale=1.0,
sd_strength=1.0,
controlnet_method="control_v11p_sd15_inpaint",
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sd_controlnet_local_native_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_controlnet_switch(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name="sd1.5",
sd_controlnet=True,
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_controlnet_method="control_v11p_sd15_canny",
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_method="control_v11p_sd15_inpaint",
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sd_controlnet_switch_to_inpaint_local_model_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
| 6,219 | 30.734694 | 85 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_plugins.py | import hashlib
import os
import time
from lama_cleaner.plugins.anime_seg import AnimeSeg
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
import cv2
import pytest
import torch.cuda
from lama_cleaner.plugins import (
RemoveBG,
RealESRGANUpscaler,
GFPGANPlugin,
RestoreFormerPlugin,
InteractiveSeg,
)
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
img_p = current_dir / "bunny.jpeg"
img_bytes = open(img_p, "rb").read()
bgr_img = cv2.imread(str(img_p))
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
def _save(img, name):
cv2.imwrite(str(save_dir / name), img)
def test_remove_bg():
model = RemoveBG()
res = model.forward(bgr_img)
res = cv2.cvtColor(res, cv2.COLOR_RGBA2BGRA)
_save(res, "test_remove_bg.png")
def test_anime_seg():
model = AnimeSeg()
img = cv2.imread(str(current_dir / "anime_test.png"))
res = model.forward(img)
assert len(res.shape) == 3
assert res.shape[-1] == 4
_save(res, "test_anime_seg.png")
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_upscale(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
model = RealESRGANUpscaler("realesr-general-x4v3", device)
res = model.forward(bgr_img, 2)
_save(res, f"test_upscale_x2_{device}.png")
res = model.forward(bgr_img, 4)
_save(res, f"test_upscale_x4_{device}.png")
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_gfpgan(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
model = GFPGANPlugin(device)
res = model(rgb_img, None, None)
_save(res, f"test_gfpgan_{device}.png")
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_restoreformer(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
model = RestoreFormerPlugin(device)
res = model(rgb_img, None, None)
_save(res, f"test_restoreformer_{device}.png")
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_segment_anything(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
img_md5 = hashlib.md5(img_bytes).hexdigest()
model = InteractiveSeg("vit_l", device)
new_mask = model.forward(rgb_img, [[448 // 2, 394 // 2, 1]], img_md5)
save_name = f"test_segment_anything_{device}.png"
_save(new_mask, save_name)
start = time.time()
model.forward(rgb_img, [[448 // 2, 394 // 2, 1]], img_md5)
print(f"Time for {save_name}: {time.time() - start:.2f}s")
| 2,965 | 27.519231 | 73 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_model_md5.py | def test_load_model():
from lama_cleaner.plugins import InteractiveSeg
from lama_cleaner.model_manager import ModelManager
interactive_seg_model = InteractiveSeg('vit_l', 'cpu')
models = [
"lama",
"ldm",
"zits",
"mat",
"fcf",
"manga",
]
for m in models:
ModelManager(
name=m,
device="cpu",
no_half=False,
hf_access_token="",
disable_nsfw=False,
sd_cpu_textencoder=True,
sd_run_local=True,
local_files_only=True,
cpu_offload=True,
enable_xformers=False,
)
# def create_empty_file(tmp_dir, name):
# tmp_model_dir = os.path.join(tmp_dir, "torch", "hub", "checkpoints")
# Path(tmp_model_dir).mkdir(exist_ok=True, parents=True)
# path = os.path.join(tmp_model_dir, name)
# with open(path, "w") as f:
# f.write("1")
#
#
# def test_load_model_error():
# MODELS = [
# ("big-lama.pt", "e3aa4aaa15225a33ec84f9f4bc47e500"),
# ("cond_stage_model_encode.pt", "23239fc9081956a3e70de56472b3f296"),
# ("cond_stage_model_decode.pt", "fe419cd15a750d37a4733589d0d3585c"),
# ("diffusion.pt", "b0afda12bf790c03aba2a7431f11d22d"),
# ]
# with tempfile.TemporaryDirectory() as tmp_dir:
# os.environ["XDG_CACHE_HOME"] = tmp_dir
# for name, md5 in MODELS:
# create_empty_file(tmp_dir, name)
# test_load_model()
| 1,505 | 29.12 | 77 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_sd_model.py | import os
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler
from lama_cleaner.tests.test_model import get_config, assert_equal
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
@pytest.mark.parametrize("cpu_textencoder", [True, False])
@pytest.mark.parametrize("disable_nsfw", [True, False])
def test_runway_sd_1_5_ddim(
sd_device, strategy, sampler, cpu_textencoder, disable_nsfw
):
def callback(i, t, latents):
pass
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
callback=callback,
)
cfg = get_config(strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3,
)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize(
"sampler", [SDSampler.pndm, SDSampler.k_lms, SDSampler.k_euler, SDSampler.k_euler_a]
)
@pytest.mark.parametrize("cpu_textencoder", [False])
@pytest.mark.parametrize("disable_nsfw", [True])
def test_runway_sd_1_5(sd_device, strategy, sampler, cpu_textencoder, disable_nsfw):
def callback(i, t, latents):
print(f"sd_step_{i}")
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
callback=callback,
)
cfg = get_config(strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3,
)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_runway_sd_1_5_negative_prompt(sd_device, strategy, sampler):
def callback(i, t, latents):
pass
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=False,
sd_cpu_textencoder=False,
callback=callback,
)
cfg = get_config(
strategy,
sd_steps=sd_steps,
prompt="Face of a fox, high resolution, sitting on a park bench",
negative_prompt="orange, yellow, small",
sd_sampler=sampler,
sd_match_histograms=True,
)
name = f"{sampler}_negative_prompt"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1,
)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
@pytest.mark.parametrize("cpu_textencoder", [False])
@pytest.mark.parametrize("disable_nsfw", [False])
def test_runway_sd_1_5_sd_scale(
sd_device, strategy, sampler, cpu_textencoder, disable_nsfw
):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
)
cfg = get_config(
strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps, sd_scale=0.85
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}_sdscale.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3,
)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
def test_runway_sd_1_5_cpu_offload(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
)
cfg = get_config(
strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps, sd_scale=0.85
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}_cpu_offload.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_local_file_path(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 50
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_local_model_path="/Users/cwq/data/models/sd-v1-5-inpainting.ckpt",
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sd_local_model_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
| 7,647 | 30.603306 | 99 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/__init__.py | 0 | 0 | 0 | py | |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_model.py | from pathlib import Path
import cv2
import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import Config, HDStrategy, LDMSampler, SDSampler
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
def get_data(
fx: float = 1,
fy: float = 1.0,
img_p=current_dir / "image.png",
mask_p=current_dir / "mask.png",
):
img = cv2.imread(str(img_p))
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
mask = cv2.imread(str(mask_p), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, None, fx=fx, fy=fy, interpolation=cv2.INTER_AREA)
mask = cv2.resize(mask, None, fx=fx, fy=fy, interpolation=cv2.INTER_NEAREST)
return img, mask
def get_config(strategy, **kwargs):
data = dict(
ldm_steps=1,
ldm_sampler=LDMSampler.plms,
hd_strategy=strategy,
hd_strategy_crop_margin=32,
hd_strategy_crop_trigger_size=200,
hd_strategy_resize_limit=200,
)
data.update(**kwargs)
return Config(**data)
def assert_equal(
model,
config,
gt_name,
fx: float = 1,
fy: float = 1,
img_p=current_dir / "image.png",
mask_p=current_dir / "mask.png",
):
img, mask = get_data(fx=fx, fy=fy, img_p=img_p, mask_p=mask_p)
print(f"Input image shape: {img.shape}")
res = model(img, mask, config)
cv2.imwrite(
str(save_dir / gt_name),
res,
[int(cv2.IMWRITE_JPEG_QUALITY), 100, int(cv2.IMWRITE_PNG_COMPRESSION), 0],
)
"""
Note that JPEG is lossy compression, so even if it is the highest quality 100,
when the saved images is reloaded, a difference occurs with the original pixel value.
If you want to save the original images as it is, save it as PNG or BMP.
"""
# gt = cv2.imread(str(current_dir / gt_name), cv2.IMREAD_UNCHANGED)
# assert np.array_equal(res, gt)
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
def test_lama(strategy):
model = ModelManager(name="lama", device=device)
assert_equal(
model,
get_config(strategy),
f"lama_{strategy[0].upper() + strategy[1:]}_result.png",
)
fx = 1.3
assert_equal(
model,
get_config(strategy),
f"lama_{strategy[0].upper() + strategy[1:]}_fx_{fx}_result.png",
fx=1.3,
)
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
@pytest.mark.parametrize("ldm_sampler", [LDMSampler.ddim, LDMSampler.plms])
def test_ldm(strategy, ldm_sampler):
model = ModelManager(name="ldm", device=device)
cfg = get_config(strategy, ldm_sampler=ldm_sampler)
assert_equal(
model, cfg, f"ldm_{strategy[0].upper() + strategy[1:]}_{ldm_sampler}_result.png"
)
fx = 1.3
assert_equal(
model,
cfg,
f"ldm_{strategy[0].upper() + strategy[1:]}_{ldm_sampler}_fx_{fx}_result.png",
fx=fx,
)
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
@pytest.mark.parametrize("zits_wireframe", [False, True])
def test_zits(strategy, zits_wireframe):
model = ModelManager(name="zits", device=device)
cfg = get_config(strategy, zits_wireframe=zits_wireframe)
# os.environ['ZITS_DEBUG_LINE_PATH'] = str(current_dir / 'zits_debug_line.jpg')
# os.environ['ZITS_DEBUG_EDGE_PATH'] = str(current_dir / 'zits_debug_edge.jpg')
assert_equal(
model,
cfg,
f"zits_{strategy[0].upper() + strategy[1:]}_wireframe_{zits_wireframe}_result.png",
)
fx = 1.3
assert_equal(
model,
cfg,
f"zits_{strategy.capitalize()}_wireframe_{zits_wireframe}_fx_{fx}_result.png",
fx=fx,
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("no_half", [True, False])
def test_mat(strategy, no_half):
model = ModelManager(name="mat", device=device, no_half=no_half)
cfg = get_config(strategy)
for _ in range(10):
assert_equal(
model,
cfg,
f"mat_{strategy.capitalize()}_result.png",
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_fcf(strategy):
model = ModelManager(name="fcf", device=device)
cfg = get_config(strategy)
assert_equal(model, cfg, f"fcf_{strategy.capitalize()}_result.png", fx=2, fy=2)
assert_equal(model, cfg, f"fcf_{strategy.capitalize()}_result.png", fx=3.8, fy=2)
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
@pytest.mark.parametrize("cv2_flag", ["INPAINT_NS", "INPAINT_TELEA"])
@pytest.mark.parametrize("cv2_radius", [3, 15])
def test_cv2(strategy, cv2_flag, cv2_radius):
model = ModelManager(
name="cv2",
device=torch.device(device),
)
cfg = get_config(strategy, cv2_flag=cv2_flag, cv2_radius=cv2_radius)
assert_equal(
model,
cfg,
f"sd_{strategy.capitalize()}_{cv2_flag}_{cv2_radius}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
def test_manga(strategy):
model = ModelManager(
name="manga",
device=torch.device(device),
)
cfg = get_config(strategy)
assert_equal(
model,
cfg,
f"sd_{strategy.capitalize()}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
| 5,826 | 28.882051 | 91 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_save_exif.py | import io
from pathlib import Path
from PIL import Image
from lama_cleaner.helper import pil_to_bytes, load_img
current_dir = Path(__file__).parent.absolute().resolve()
def print_exif(exif):
for k, v in exif.items():
print(f"{k}: {v}")
def run_test(img_p: Path):
print(img_p)
ext = img_p.suffix.strip(".")
img_bytes = img_p.read_bytes()
np_img, _, exif_infos = load_img(img_bytes, False, True)
print(exif_infos)
print("Original exif_infos")
print_exif(exif_infos["exif"])
pil_to_bytes(Image.fromarray(np_img), ext=ext, exif_infos={})
pil_bytes = pil_to_bytes(Image.fromarray(np_img), ext=ext, exif_infos=exif_infos)
res_img = Image.open(io.BytesIO(pil_bytes))
print(f"Result img info: {res_img.info}")
res_exif = res_img.getexif()
print_exif(res_exif)
assert res_exif == exif_infos["exif"]
assert exif_infos["parameters"] == res_img.info.get("parameters")
def test_png():
run_test(current_dir / "image.png")
run_test(current_dir / "pnginfo_test.png")
def test_jpeg():
jpg_img_p = current_dir / "bunny.jpeg"
run_test(jpg_img_p)
| 1,128 | 24.659091 | 85 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/base.py | import abc
from typing import Optional
import cv2
import torch
import numpy as np
from loguru import logger
from lama_cleaner.helper import (
boxes_from_mask,
resize_max_size,
pad_img_to_modulo,
switch_mps_device,
)
from lama_cleaner.schema import Config, HDStrategy
class InpaintModel:
name = "base"
min_size: Optional[int] = None
pad_mod = 8
pad_to_square = False
def __init__(self, device, **kwargs):
"""
Args:
device:
"""
device = switch_mps_device(self.name, device)
self.device = device
self.init_model(device, **kwargs)
@abc.abstractmethod
def init_model(self, device, **kwargs):
...
@staticmethod
@abc.abstractmethod
def is_downloaded() -> bool:
...
@abc.abstractmethod
def forward(self, image, mask, config: Config):
"""Input images and output images have same size
images: [H, W, C] RGB
masks: [H, W, 1] 255 为 masks 区域
return: BGR IMAGE
"""
...
def _pad_forward(self, image, mask, config: Config):
origin_height, origin_width = image.shape[:2]
pad_image = pad_img_to_modulo(
image, mod=self.pad_mod, square=self.pad_to_square, min_size=self.min_size
)
pad_mask = pad_img_to_modulo(
mask, mod=self.pad_mod, square=self.pad_to_square, min_size=self.min_size
)
logger.info(f"final forward pad size: {pad_image.shape}")
result = self.forward(pad_image, pad_mask, config)
result = result[0:origin_height, 0:origin_width, :]
result, image, mask = self.forward_post_process(result, image, mask, config)
mask = mask[:, :, np.newaxis]
result = result * (mask / 255) + image[:, :, ::-1] * (1 - (mask / 255))
return result
def forward_post_process(self, result, image, mask, config):
return result, image, mask
@torch.no_grad()
def __call__(self, image, mask, config: Config):
"""
images: [H, W, C] RGB, not normalized
masks: [H, W]
return: BGR IMAGE
"""
inpaint_result = None
logger.info(f"hd_strategy: {config.hd_strategy}")
if config.hd_strategy == HDStrategy.CROP:
if max(image.shape) > config.hd_strategy_crop_trigger_size:
logger.info(f"Run crop strategy")
boxes = boxes_from_mask(mask)
crop_result = []
for box in boxes:
crop_image, crop_box = self._run_box(image, mask, box, config)
crop_result.append((crop_image, crop_box))
inpaint_result = image[:, :, ::-1]
for crop_image, crop_box in crop_result:
x1, y1, x2, y2 = crop_box
inpaint_result[y1:y2, x1:x2, :] = crop_image
elif config.hd_strategy == HDStrategy.RESIZE:
if max(image.shape) > config.hd_strategy_resize_limit:
origin_size = image.shape[:2]
downsize_image = resize_max_size(
image, size_limit=config.hd_strategy_resize_limit
)
downsize_mask = resize_max_size(
mask, size_limit=config.hd_strategy_resize_limit
)
logger.info(
f"Run resize strategy, origin size: {image.shape} forward size: {downsize_image.shape}"
)
inpaint_result = self._pad_forward(
downsize_image, downsize_mask, config
)
# only paste masked area result
inpaint_result = cv2.resize(
inpaint_result,
(origin_size[1], origin_size[0]),
interpolation=cv2.INTER_CUBIC,
)
original_pixel_indices = mask < 127
inpaint_result[original_pixel_indices] = image[:, :, ::-1][
original_pixel_indices
]
if inpaint_result is None:
inpaint_result = self._pad_forward(image, mask, config)
return inpaint_result
def _crop_box(self, image, mask, box, config: Config):
"""
Args:
image: [H, W, C] RGB
mask: [H, W, 1]
box: [left,top,right,bottom]
Returns:
BGR IMAGE, (l, r, r, b)
"""
box_h = box[3] - box[1]
box_w = box[2] - box[0]
cx = (box[0] + box[2]) // 2
cy = (box[1] + box[3]) // 2
img_h, img_w = image.shape[:2]
w = box_w + config.hd_strategy_crop_margin * 2
h = box_h + config.hd_strategy_crop_margin * 2
_l = cx - w // 2
_r = cx + w // 2
_t = cy - h // 2
_b = cy + h // 2
l = max(_l, 0)
r = min(_r, img_w)
t = max(_t, 0)
b = min(_b, img_h)
# try to get more context when crop around image edge
if _l < 0:
r += abs(_l)
if _r > img_w:
l -= _r - img_w
if _t < 0:
b += abs(_t)
if _b > img_h:
t -= _b - img_h
l = max(l, 0)
r = min(r, img_w)
t = max(t, 0)
b = min(b, img_h)
crop_img = image[t:b, l:r, :]
crop_mask = mask[t:b, l:r]
logger.info(f"box size: ({box_h},{box_w}) crop size: {crop_img.shape}")
return crop_img, crop_mask, [l, t, r, b]
def _calculate_cdf(self, histogram):
cdf = histogram.cumsum()
normalized_cdf = cdf / float(cdf.max())
return normalized_cdf
def _calculate_lookup(self, source_cdf, reference_cdf):
lookup_table = np.zeros(256)
lookup_val = 0
for source_index, source_val in enumerate(source_cdf):
for reference_index, reference_val in enumerate(reference_cdf):
if reference_val >= source_val:
lookup_val = reference_index
break
lookup_table[source_index] = lookup_val
return lookup_table
def _match_histograms(self, source, reference, mask):
transformed_channels = []
for channel in range(source.shape[-1]):
source_channel = source[:, :, channel]
reference_channel = reference[:, :, channel]
# only calculate histograms for non-masked parts
source_histogram, _ = np.histogram(source_channel[mask == 0], 256, [0, 256])
reference_histogram, _ = np.histogram(
reference_channel[mask == 0], 256, [0, 256]
)
source_cdf = self._calculate_cdf(source_histogram)
reference_cdf = self._calculate_cdf(reference_histogram)
lookup = self._calculate_lookup(source_cdf, reference_cdf)
transformed_channels.append(cv2.LUT(source_channel, lookup))
result = cv2.merge(transformed_channels)
result = cv2.convertScaleAbs(result)
return result
def _apply_cropper(self, image, mask, config: Config):
img_h, img_w = image.shape[:2]
l, t, w, h = (
config.croper_x,
config.croper_y,
config.croper_width,
config.croper_height,
)
r = l + w
b = t + h
l = max(l, 0)
r = min(r, img_w)
t = max(t, 0)
b = min(b, img_h)
crop_img = image[t:b, l:r, :]
crop_mask = mask[t:b, l:r]
return crop_img, crop_mask, (l, t, r, b)
def _run_box(self, image, mask, box, config: Config):
"""
Args:
image: [H, W, C] RGB
mask: [H, W, 1]
box: [left,top,right,bottom]
Returns:
BGR IMAGE
"""
crop_img, crop_mask, [l, t, r, b] = self._crop_box(image, mask, box, config)
return self._pad_forward(crop_img, crop_mask, config), [l, t, r, b]
class DiffusionInpaintModel(InpaintModel):
@torch.no_grad()
def __call__(self, image, mask, config: Config):
"""
images: [H, W, C] RGB, not normalized
masks: [H, W]
return: BGR IMAGE
"""
# boxes = boxes_from_mask(mask)
if config.use_croper:
crop_img, crop_mask, (l, t, r, b) = self._apply_cropper(image, mask, config)
crop_image = self._scaled_pad_forward(crop_img, crop_mask, config)
inpaint_result = image[:, :, ::-1]
inpaint_result[t:b, l:r, :] = crop_image
else:
inpaint_result = self._scaled_pad_forward(image, mask, config)
return inpaint_result
def _scaled_pad_forward(self, image, mask, config: Config):
longer_side_length = int(config.sd_scale * max(image.shape[:2]))
origin_size = image.shape[:2]
downsize_image = resize_max_size(image, size_limit=longer_side_length)
downsize_mask = resize_max_size(mask, size_limit=longer_side_length)
if config.sd_scale != 1:
logger.info(
f"Resize image to do sd inpainting: {image.shape} -> {downsize_image.shape}"
)
inpaint_result = self._pad_forward(downsize_image, downsize_mask, config)
# only paste masked area result
inpaint_result = cv2.resize(
inpaint_result,
(origin_size[1], origin_size[0]),
interpolation=cv2.INTER_CUBIC,
)
original_pixel_indices = mask < 127
inpaint_result[original_pixel_indices] = image[:, :, ::-1][
original_pixel_indices
]
return inpaint_result
| 9,600 | 31.110368 | 107 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/opencv2.py | import cv2
from lama_cleaner.model.base import InpaintModel
from lama_cleaner.schema import Config
flag_map = {"INPAINT_NS": cv2.INPAINT_NS, "INPAINT_TELEA": cv2.INPAINT_TELEA}
class OpenCV2(InpaintModel):
name = "cv2"
pad_mod = 1
@staticmethod
def is_downloaded() -> bool:
return True
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W, 1]
return: BGR IMAGE
"""
cur_res = cv2.inpaint(
image[:, :, ::-1],
mask,
inpaintRadius=config.cv2_radius,
flags=flag_map[config.cv2_flag],
)
return cur_res
| 716 | 23.724138 | 77 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/lama.py | import os
import cv2
import numpy as np
import torch
from lama_cleaner.helper import (
norm_img,
get_cache_path_by_url,
load_jit_model,
)
from lama_cleaner.model.base import InpaintModel
from lama_cleaner.schema import Config
LAMA_MODEL_URL = os.environ.get(
"LAMA_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt",
)
LAMA_MODEL_MD5 = os.environ.get("LAMA_MODEL_MD5", "e3aa4aaa15225a33ec84f9f4bc47e500")
class LaMa(InpaintModel):
name = "lama"
pad_mod = 8
def init_model(self, device, **kwargs):
self.model = load_jit_model(LAMA_MODEL_URL, device, LAMA_MODEL_MD5).eval()
@staticmethod
def is_downloaded() -> bool:
return os.path.exists(get_cache_path_by_url(LAMA_MODEL_URL))
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W]
return: BGR IMAGE
"""
image = norm_img(image)
mask = norm_img(mask)
mask = (mask > 0) * 1
image = torch.from_numpy(image).unsqueeze(0).to(self.device)
mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
inpainted_image = self.model(image, mask)
cur_res = inpainted_image[0].permute(1, 2, 0).detach().cpu().numpy()
cur_res = np.clip(cur_res * 255, 0, 255).astype("uint8")
cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR)
return cur_res
| 1,480 | 27.480769 | 85 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/controlnet.py | import gc
import PIL.Image
import cv2
import numpy as np
import torch
from diffusers import ControlNetModel
from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import torch_gc, get_scheduler
from lama_cleaner.schema import Config
class CPUTextEncoderWrapper:
def __init__(self, text_encoder, torch_dtype):
self.config = text_encoder.config
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
self.torch_dtype = torch_dtype
del text_encoder
torch_gc()
def __call__(self, x, **kwargs):
input_device = x.device
return [
self.text_encoder(x.to(self.text_encoder.device), **kwargs)[0]
.to(input_device)
.to(self.torch_dtype)
]
@property
def dtype(self):
return self.torch_dtype
NAMES_MAP = {
"sd1.5": "runwayml/stable-diffusion-inpainting",
"anything4": "Sanster/anything-4.0-inpainting",
"realisticVision1.4": "Sanster/Realistic_Vision_V1.4-inpainting",
}
NATIVE_NAMES_MAP = {
"sd1.5": "runwayml/stable-diffusion-v1-5",
"anything4": "andite/anything-v4.0",
"realisticVision1.4": "SG161222/Realistic_Vision_V1.4",
}
def make_inpaint_condition(image, image_mask):
"""
image: [H, W, C] RGB
mask: [H, W, 1] 255 means area to repaint
"""
image = image.astype(np.float32) / 255.0
image[image_mask[:, :, -1] > 128] = -1.0 # set as masked pixel
image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return image
def load_from_local_model(
local_model_path, torch_dtype, controlnet, pipe_class, is_native_control_inpaint
):
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
download_from_original_stable_diffusion_ckpt,
)
logger.info(f"Converting {local_model_path} to diffusers controlnet pipeline")
try:
pipe = download_from_original_stable_diffusion_ckpt(
local_model_path,
num_in_channels=4 if is_native_control_inpaint else 9,
from_safetensors=local_model_path.endswith("safetensors"),
device="cpu",
load_safety_checker=False,
)
except Exception as e:
err_msg = str(e)
logger.exception(e)
if is_native_control_inpaint and "[320, 9, 3, 3]" in err_msg:
logger.error(
"control_v11p_sd15_inpaint method requires normal SD model, not inpainting SD model"
)
if not is_native_control_inpaint and "[320, 4, 3, 3]" in err_msg:
logger.error(
f"{controlnet.config['_name_or_path']} method requires inpainting SD model, "
f"you can convert any SD model to inpainting model in AUTO1111: \n"
f"https://www.reddit.com/r/StableDiffusion/comments/zyi24j/how_to_turn_any_model_into_an_inpainting_model/"
)
exit(-1)
inpaint_pipe = pipe_class(
vae=pipe.vae,
text_encoder=pipe.text_encoder,
tokenizer=pipe.tokenizer,
unet=pipe.unet,
controlnet=controlnet,
scheduler=pipe.scheduler,
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
)
del pipe
gc.collect()
return inpaint_pipe.to(torch_dtype=torch_dtype)
class ControlNet(DiffusionInpaintModel):
name = "controlnet"
pad_mod = 8
min_size = 512
def init_model(self, device: torch.device, **kwargs):
fp16 = not kwargs.get("no_half", False)
model_kwargs = {
"local_files_only": kwargs.get("local_files_only", kwargs["sd_run_local"])
}
if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False):
logger.info("Disable Stable Diffusion Model NSFW checker")
model_kwargs.update(
dict(
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
)
)
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
sd_controlnet_method = kwargs["sd_controlnet_method"]
self.sd_controlnet_method = sd_controlnet_method
if sd_controlnet_method == "control_v11p_sd15_inpaint":
from diffusers import StableDiffusionControlNetPipeline as PipeClass
self.is_native_control_inpaint = True
else:
from .pipeline import StableDiffusionControlNetInpaintPipeline as PipeClass
self.is_native_control_inpaint = False
if self.is_native_control_inpaint:
model_id = NATIVE_NAMES_MAP[kwargs["name"]]
else:
model_id = NAMES_MAP[kwargs["name"]]
controlnet = ControlNetModel.from_pretrained(
f"lllyasviel/{sd_controlnet_method}", torch_dtype=torch_dtype
)
self.is_local_sd_model = False
if kwargs.get("sd_local_model_path", None):
self.is_local_sd_model = True
self.model = load_from_local_model(
kwargs["sd_local_model_path"],
torch_dtype=torch_dtype,
controlnet=controlnet,
pipe_class=PipeClass,
is_native_control_inpaint=self.is_native_control_inpaint,
)
else:
self.model = PipeClass.from_pretrained(
model_id,
controlnet=controlnet,
revision="fp16" if use_gpu and fp16 else "main",
torch_dtype=torch_dtype,
**model_kwargs,
)
# https://huggingface.co/docs/diffusers/v0.7.0/en/api/pipelines/stable_diffusion#diffusers.StableDiffusionInpaintPipeline.enable_attention_slicing
self.model.enable_attention_slicing()
# https://huggingface.co/docs/diffusers/v0.7.0/en/optimization/fp16#memory-efficient-attention
if kwargs.get("enable_xformers", False):
self.model.enable_xformers_memory_efficient_attention()
if kwargs.get("cpu_offload", False) and use_gpu:
logger.info("Enable sequential cpu offload")
self.model.enable_sequential_cpu_offload(gpu_id=0)
else:
self.model = self.model.to(device)
if kwargs["sd_cpu_textencoder"]:
logger.info("Run Stable Diffusion TextEncoder on CPU")
self.model.text_encoder = CPUTextEncoderWrapper(
self.model.text_encoder, torch_dtype
)
self.callback = kwargs.pop("callback", None)
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W, 1] 255 means area to repaint
return: BGR IMAGE
"""
scheduler_config = self.model.scheduler.config
scheduler = get_scheduler(config.sd_sampler, scheduler_config)
self.model.scheduler = scheduler
if config.sd_mask_blur != 0:
k = 2 * config.sd_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)[:, :, np.newaxis]
img_h, img_w = image.shape[:2]
if self.is_native_control_inpaint:
control_image = make_inpaint_condition(image, mask)
output = self.model(
prompt=config.prompt,
image=control_image,
height=img_h,
width=img_w,
num_inference_steps=config.sd_steps,
guidance_scale=config.sd_guidance_scale,
controlnet_conditioning_scale=config.controlnet_conditioning_scale,
negative_prompt=config.negative_prompt,
generator=torch.manual_seed(config.sd_seed),
output_type="np.array",
callback=self.callback,
).images[0]
else:
if "canny" in self.sd_controlnet_method:
canny_image = cv2.Canny(image, 100, 200)
canny_image = canny_image[:, :, None]
canny_image = np.concatenate(
[canny_image, canny_image, canny_image], axis=2
)
canny_image = PIL.Image.fromarray(canny_image)
control_image = canny_image
elif "openpose" in self.sd_controlnet_method:
from controlnet_aux import OpenposeDetector
processor = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
control_image = processor(image, hand_and_face=True)
elif "depth" in self.sd_controlnet_method:
from transformers import pipeline
depth_estimator = pipeline("depth-estimation")
depth_image = depth_estimator(PIL.Image.fromarray(image))["depth"]
depth_image = np.array(depth_image)
depth_image = depth_image[:, :, None]
depth_image = np.concatenate(
[depth_image, depth_image, depth_image], axis=2
)
control_image = PIL.Image.fromarray(depth_image)
else:
raise NotImplementedError(
f"{self.sd_controlnet_method} not implemented"
)
mask_image = PIL.Image.fromarray(mask[:, :, -1], mode="L")
image = PIL.Image.fromarray(image)
output = self.model(
image=image,
control_image=control_image,
prompt=config.prompt,
negative_prompt=config.negative_prompt,
mask_image=mask_image,
num_inference_steps=config.sd_steps,
guidance_scale=config.sd_guidance_scale,
output_type="np.array",
callback=self.callback,
height=img_h,
width=img_w,
generator=torch.manual_seed(config.sd_seed),
controlnet_conditioning_scale=config.controlnet_conditioning_scale,
).images[0]
output = (output * 255).round().astype("uint8")
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
def forward_post_process(self, result, image, mask, config):
if config.sd_match_histograms:
result = self._match_histograms(result, image[:, :, ::-1], mask)
if config.sd_mask_blur != 0:
k = 2 * config.sd_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)
return result, image, mask
@staticmethod
def is_downloaded() -> bool:
# model will be downloaded when app start, and can't switch in frontend settings
return True
| 10,883 | 36.531034 | 154 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/utils.py | import math
import random
from typing import Any
import torch
import numpy as np
import collections
from itertools import repeat
from diffusers import (
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
UniPCMultistepScheduler,
)
from lama_cleaner.schema import SDSampler
from torch import conv2d, conv_transpose2d
def make_beta_schedule(
device, schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3
):
if schedule == "linear":
betas = (
torch.linspace(
linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64
)
** 2
)
elif schedule == "cosine":
timesteps = (
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
).to(device)
alphas = timesteps / (1 + cosine_s) * np.pi / 2
alphas = torch.cos(alphas).pow(2).to(device)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = np.clip(betas, a_min=0, a_max=0.999)
elif schedule == "sqrt_linear":
betas = torch.linspace(
linear_start, linear_end, n_timestep, dtype=torch.float64
)
elif schedule == "sqrt":
betas = (
torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
** 0.5
)
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy()
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt(
(1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)
)
if verbose:
print(
f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}"
)
print(
f"For the chosen value of eta, which is {eta}, "
f"this results in the following sigma_t schedule for ddim sampler {sigmas}"
)
return sigmas, alphas, alphas_prev
def make_ddim_timesteps(
ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True
):
if ddim_discr_method == "uniform":
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == "quad":
ddim_timesteps = (
(np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2
).astype(int)
else:
raise NotImplementedError(
f'There is no ddim discretization method called "{ddim_discr_method}"'
)
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f"Selected timesteps for ddim sampler: {steps_out}")
return steps_out
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(
shape[0], *((1,) * (len(shape) - 1))
)
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
def timestep_embedding(device, timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = torch.exp(
-math.log(max_period)
* torch.arange(start=0, end=half, dtype=torch.float32)
/ half
).to(device=device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
###### MAT and FcF #######
def normalize_2nd_moment(x, dim=1):
return (
x * (x.square().mean(dim=dim, keepdim=True) + torch.finfo(x.dtype).eps).rsqrt()
)
class EasyDict(dict):
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name]
def _bias_act_ref(x, b=None, dim=1, act="linear", alpha=None, gain=None, clamp=None):
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops."""
assert isinstance(x, torch.Tensor)
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Add bias.
if b is not None:
assert isinstance(b, torch.Tensor) and b.ndim == 1
assert 0 <= dim < x.ndim
assert b.shape[0] == x.shape[dim]
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
# Evaluate activation function.
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
# Scale by gain.
gain = float(gain)
if gain != 1:
x = x * gain
# Clamp.
if clamp >= 0:
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
return x
def bias_act(
x, b=None, dim=1, act="linear", alpha=None, gain=None, clamp=None, impl="ref"
):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ["ref", "cuda"]
return _bias_act_ref(
x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp
)
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
fw = int(fw)
fh = int(fh)
assert fw >= 1 and fh >= 1
return fw, fh
def _get_weight_shape(w):
shape = [int(sz) for sz in w.shape]
return shape
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def setup_filter(
f,
device=torch.device("cpu"),
normalize=True,
flip_filter=False,
gain=1,
separable=None,
):
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
Args:
f: Torch tensor, numpy array, or python list of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable),
`[]` (impulse), or
`None` (identity).
device: Result device (default: cpu).
normalize: Normalize the filter so that it retains the magnitude
for constant input signal (DC)? (default: True).
flip_filter: Flip the filter? (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
separable: Return a separable filter? (default: select automatically).
Returns:
Float32 tensor of the shape
`[filter_height, filter_width]` (non-separable) or
`[filter_taps]` (separable).
"""
# Validate.
if f is None:
f = 1
f = torch.as_tensor(f, dtype=torch.float32)
assert f.ndim in [0, 1, 2]
assert f.numel() > 0
if f.ndim == 0:
f = f[np.newaxis]
# Separable?
if separable is None:
separable = f.ndim == 1 and f.numel() >= 8
if f.ndim == 1 and not separable:
f = f.ger(f)
assert f.ndim == (1 if separable else 2)
# Apply normalize, flip, gain, and device.
if normalize:
f /= f.sum()
if flip_filter:
f = f.flip(list(range(f.ndim)))
f = f * (gain ** (f.ndim / 2))
f = f.to(device=device)
return f
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
activation_funcs = {
"linear": EasyDict(
func=lambda x, **_: x,
def_alpha=0,
def_gain=1,
cuda_idx=1,
ref="",
has_2nd_grad=False,
),
"relu": EasyDict(
func=lambda x, **_: torch.nn.functional.relu(x),
def_alpha=0,
def_gain=np.sqrt(2),
cuda_idx=2,
ref="y",
has_2nd_grad=False,
),
"lrelu": EasyDict(
func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha),
def_alpha=0.2,
def_gain=np.sqrt(2),
cuda_idx=3,
ref="y",
has_2nd_grad=False,
),
"tanh": EasyDict(
func=lambda x, **_: torch.tanh(x),
def_alpha=0,
def_gain=1,
cuda_idx=4,
ref="y",
has_2nd_grad=True,
),
"sigmoid": EasyDict(
func=lambda x, **_: torch.sigmoid(x),
def_alpha=0,
def_gain=1,
cuda_idx=5,
ref="y",
has_2nd_grad=True,
),
"elu": EasyDict(
func=lambda x, **_: torch.nn.functional.elu(x),
def_alpha=0,
def_gain=1,
cuda_idx=6,
ref="y",
has_2nd_grad=True,
),
"selu": EasyDict(
func=lambda x, **_: torch.nn.functional.selu(x),
def_alpha=0,
def_gain=1,
cuda_idx=7,
ref="y",
has_2nd_grad=True,
),
"softplus": EasyDict(
func=lambda x, **_: torch.nn.functional.softplus(x),
def_alpha=0,
def_gain=1,
cuda_idx=8,
ref="y",
has_2nd_grad=True,
),
"swish": EasyDict(
func=lambda x, **_: torch.sigmoid(x) * x,
def_alpha=0,
def_gain=np.sqrt(2),
cuda_idx=9,
ref="x",
has_2nd_grad=True,
),
}
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl="cuda"):
r"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# assert isinstance(x, torch.Tensor)
# assert impl in ['ref', 'cuda']
return _upfirdn2d_ref(
x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain
)
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops."""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
# upx, upy = _parse_scaling(up)
# downx, downy = _parse_scaling(down)
upx, upy = up, up
downx, downy = down, down
# padx0, padx1, pady0, pady1 = _parse_padding(padding)
padx0, padx1, pady0, pady1 = padding[0], padding[1], padding[2], padding[3]
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(
x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]
)
x = x[
:,
:,
max(-pady0, 0) : x.shape[2] - max(-pady1, 0),
max(-padx0, 0) : x.shape[3] - max(-padx1, 0),
]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl="cuda"):
r"""Downsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a fraction of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the input. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
downx, downy = _parse_scaling(down)
# padx0, padx1, pady0, pady1 = _parse_padding(padding)
padx0, padx1, pady0, pady1 = padding, padding, padding, padding
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw - downx + 1) // 2,
padx1 + (fw - downx) // 2,
pady0 + (fh - downy + 1) // 2,
pady1 + (fh - downy) // 2,
]
return upfirdn2d(
x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl
)
def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl="cuda"):
r"""Upsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a multiple of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
upx, upy = _parse_scaling(up)
# upx, upy = up, up
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# padx0, padx1, pady0, pady1 = padding, padding, padding, padding
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw + upx - 1) // 2,
padx1 + (fw - upx) // 2,
pady0 + (fh + upy - 1) // 2,
pady1 + (fh - upy) // 2,
]
return upfirdn2d(
x,
f,
up=up,
padding=p,
flip_filter=flip_filter,
gain=gain * upx * upy,
impl=impl,
)
class MinibatchStdLayer(torch.nn.Module):
def __init__(self, group_size, num_channels=1):
super().__init__()
self.group_size = group_size
self.num_channels = num_channels
def forward(self, x):
N, C, H, W = x.shape
G = (
torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N))
if self.group_size is not None
else N
)
F = self.num_channels
c = C // F
y = x.reshape(
G, -1, F, c, H, W
) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group.
y = y.square().mean(dim=0) # [nFcHW] Calc variance over group.
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
y = y.mean(dim=[2, 3, 4]) # [nF] Take average over channels and pixels.
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels.
x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels.
return x
class FullyConnectedLayer(torch.nn.Module):
def __init__(
self,
in_features, # Number of input features.
out_features, # Number of output features.
bias=True, # Apply additive bias before the activation function?
activation="linear", # Activation function: 'relu', 'lrelu', etc.
lr_multiplier=1, # Learning rate multiplier.
bias_init=0, # Initial value for the additive bias.
):
super().__init__()
self.weight = torch.nn.Parameter(
torch.randn([out_features, in_features]) / lr_multiplier
)
self.bias = (
torch.nn.Parameter(torch.full([out_features], np.float32(bias_init)))
if bias
else None
)
self.activation = activation
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight * self.weight_gain
b = self.bias
if b is not None and self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == "linear" and b is not None:
# out = torch.addmm(b.unsqueeze(0), x, w.t())
x = x.matmul(w.t())
out = x + b.reshape([-1 if i == x.ndim - 1 else 1 for i in range(x.ndim)])
else:
x = x.matmul(w.t())
out = bias_act(x, b, act=self.activation, dim=x.ndim - 1)
return out
def _conv2d_wrapper(
x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True
):
"""Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations."""
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
# Flip weight if requested.
if (
not flip_weight
): # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
w = w.flip([2, 3])
# Workaround performance pitfall in cuDNN 8.0.5, triggered when using
# 1x1 kernel + memory_format=channels_last + less than 64 channels.
if (
kw == 1
and kh == 1
and stride == 1
and padding in [0, [0, 0], (0, 0)]
and not transpose
):
if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64:
if out_channels <= 4 and groups == 1:
in_shape = x.shape
x = w.squeeze(3).squeeze(2) @ x.reshape(
[in_shape[0], in_channels_per_group, -1]
)
x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])
else:
x = x.to(memory_format=torch.contiguous_format)
w = w.to(memory_format=torch.contiguous_format)
x = conv2d(x, w, groups=groups)
return x.to(memory_format=torch.channels_last)
# Otherwise => execute using conv2d_gradfix.
op = conv_transpose2d if transpose else conv2d
return op(x, w, stride=stride, padding=padding, groups=groups)
def conv2d_resample(
x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False
):
r"""2D convolution with optional up/downsampling.
Padding is performed only once at the beginning, not between the operations.
Args:
x: Input tensor of shape
`[batch_size, in_channels, in_height, in_width]`.
w: Weight tensor of shape
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
calling setup_filter(). None = identity (default).
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
groups: Split input channels into N groups (default: 1).
flip_weight: False = convolution, True = correlation (default: True).
flip_filter: False = convolution, True = correlation (default: False).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2])
assert isinstance(up, int) and (up >= 1)
assert isinstance(down, int) and (down >= 1)
# assert isinstance(groups, int) and (groups >= 1), f"!!!!!! groups: {groups} isinstance(groups, int) {isinstance(groups, int)} {type(groups)}"
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
fw, fh = _get_filter_size(f)
# px0, px1, py0, py1 = _parse_padding(padding)
px0, px1, py0, py1 = padding, padding, padding, padding
# Adjust padding to account for up/downsampling.
if up > 1:
px0 += (fw + up - 1) // 2
px1 += (fw - up) // 2
py0 += (fh + up - 1) // 2
py1 += (fh - up) // 2
if down > 1:
px0 += (fw - down + 1) // 2
px1 += (fw - down) // 2
py0 += (fh - down + 1) // 2
py1 += (fh - down) // 2
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
if kw == 1 and kh == 1 and (down > 1 and up == 1):
x = upfirdn2d(
x=x, f=f, down=down, padding=[px0, px1, py0, py1], flip_filter=flip_filter
)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
return x
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
if kw == 1 and kh == 1 and (up > 1 and down == 1):
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
x = upfirdn2d(
x=x,
f=f,
up=up,
padding=[px0, px1, py0, py1],
gain=up ** 2,
flip_filter=flip_filter,
)
return x
# Fast path: downsampling only => use strided convolution.
if down > 1 and up == 1:
x = upfirdn2d(x=x, f=f, padding=[px0, px1, py0, py1], flip_filter=flip_filter)
x = _conv2d_wrapper(
x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight
)
return x
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
if up > 1:
if groups == 1:
w = w.transpose(0, 1)
else:
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
w = w.transpose(1, 2)
w = w.reshape(
groups * in_channels_per_group, out_channels // groups, kh, kw
)
px0 -= kw - 1
px1 -= kw - up
py0 -= kh - 1
py1 -= kh - up
pxt = max(min(-px0, -px1), 0)
pyt = max(min(-py0, -py1), 0)
x = _conv2d_wrapper(
x=x,
w=w,
stride=up,
padding=[pyt, pxt],
groups=groups,
transpose=True,
flip_weight=(not flip_weight),
)
x = upfirdn2d(
x=x,
f=f,
padding=[px0 + pxt, px1 + pxt, py0 + pyt, py1 + pyt],
gain=up ** 2,
flip_filter=flip_filter,
)
if down > 1:
x = upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
if up == 1 and down == 1:
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
return _conv2d_wrapper(
x=x, w=w, padding=[py0, px0], groups=groups, flip_weight=flip_weight
)
# Fallback: Generic reference implementation.
x = upfirdn2d(
x=x,
f=(f if up > 1 else None),
up=up,
padding=[px0, px1, py0, py1],
gain=up ** 2,
flip_filter=flip_filter,
)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
if down > 1:
x = upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
class Conv2dLayer(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias=True, # Apply additive bias before the activation function?
activation="linear", # Activation function: 'relu', 'lrelu', etc.
up=1, # Integer upsampling factor.
down=1, # Integer downsampling factor.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output to +-X, None = disable clamping.
channels_last=False, # Expect the input to have memory_format=channels_last?
trainable=True, # Update the weights of this layer during training?
):
super().__init__()
self.activation = activation
self.up = up
self.down = down
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.conv_clamp = conv_clamp
self.padding = kernel_size // 2
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.act_gain = activation_funcs[activation].def_gain
memory_format = (
torch.channels_last if channels_last else torch.contiguous_format
)
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
memory_format=memory_format
)
bias = torch.zeros([out_channels]) if bias else None
if trainable:
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(bias) if bias is not None else None
else:
self.register_buffer("weight", weight)
if bias is not None:
self.register_buffer("bias", bias)
else:
self.bias = None
def forward(self, x, gain=1):
w = self.weight * self.weight_gain
x = conv2d_resample(
x=x,
w=w,
f=self.resample_filter,
up=self.up,
down=self.down,
padding=self.padding,
)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
out = bias_act(
x, self.bias, act=self.activation, gain=act_gain, clamp=act_clamp
)
return out
def torch_gc():
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_scheduler(sd_sampler, scheduler_config):
if sd_sampler == SDSampler.ddim:
return DDIMScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.pndm:
return PNDMScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.k_lms:
return LMSDiscreteScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.k_euler:
return EulerDiscreteScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.k_euler_a:
return EulerAncestralDiscreteScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.dpm_plus_plus:
return DPMSolverMultistepScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.uni_pc:
return UniPCMultistepScheduler.from_config(scheduler_config)
else:
raise ValueError(sd_sampler)
| 33,811 | 34.893843 | 148 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/zits.py | import os
import time
import cv2
import torch
import torch.nn.functional as F
from lama_cleaner.helper import get_cache_path_by_url, load_jit_model
from lama_cleaner.schema import Config
import numpy as np
from lama_cleaner.model.base import InpaintModel
ZITS_INPAINT_MODEL_URL = os.environ.get(
"ZITS_INPAINT_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-inpaint-0717.pt",
)
ZITS_INPAINT_MODEL_MD5 = os.environ.get(
"ZITS_INPAINT_MODEL_MD5", "9978cc7157dc29699e42308d675b2154"
)
ZITS_EDGE_LINE_MODEL_URL = os.environ.get(
"ZITS_EDGE_LINE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-edge-line-0717.pt",
)
ZITS_EDGE_LINE_MODEL_MD5 = os.environ.get(
"ZITS_EDGE_LINE_MODEL_MD5", "55e31af21ba96bbf0c80603c76ea8c5f"
)
ZITS_STRUCTURE_UPSAMPLE_MODEL_URL = os.environ.get(
"ZITS_STRUCTURE_UPSAMPLE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-structure-upsample-0717.pt",
)
ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5 = os.environ.get(
"ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5", "3d88a07211bd41b2ec8cc0d999f29927"
)
ZITS_WIRE_FRAME_MODEL_URL = os.environ.get(
"ZITS_WIRE_FRAME_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-wireframe-0717.pt",
)
ZITS_WIRE_FRAME_MODEL_MD5 = os.environ.get(
"ZITS_WIRE_FRAME_MODEL_MD5", "a9727c63a8b48b65c905d351b21ce46b"
)
def resize(img, height, width, center_crop=False):
imgh, imgw = img.shape[0:2]
if center_crop and imgh != imgw:
# center crop
side = np.minimum(imgh, imgw)
j = (imgh - side) // 2
i = (imgw - side) // 2
img = img[j : j + side, i : i + side, ...]
if imgh > height and imgw > width:
inter = cv2.INTER_AREA
else:
inter = cv2.INTER_LINEAR
img = cv2.resize(img, (height, width), interpolation=inter)
return img
def to_tensor(img, scale=True, norm=False):
if img.ndim == 2:
img = img[:, :, np.newaxis]
c = img.shape[-1]
if scale:
img_t = torch.from_numpy(img).permute(2, 0, 1).float().div(255)
else:
img_t = torch.from_numpy(img).permute(2, 0, 1).float()
if norm:
mean = torch.tensor([0.5, 0.5, 0.5]).reshape(c, 1, 1)
std = torch.tensor([0.5, 0.5, 0.5]).reshape(c, 1, 1)
img_t = (img_t - mean) / std
return img_t
def load_masked_position_encoding(mask):
ones_filter = np.ones((3, 3), dtype=np.float32)
d_filter1 = np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32)
d_filter2 = np.array([[0, 0, 0], [1, 1, 0], [1, 1, 0]], dtype=np.float32)
d_filter3 = np.array([[0, 1, 1], [0, 1, 1], [0, 0, 0]], dtype=np.float32)
d_filter4 = np.array([[0, 0, 0], [0, 1, 1], [0, 1, 1]], dtype=np.float32)
str_size = 256
pos_num = 128
ori_mask = mask.copy()
ori_h, ori_w = ori_mask.shape[0:2]
ori_mask = ori_mask / 255
mask = cv2.resize(mask, (str_size, str_size), interpolation=cv2.INTER_AREA)
mask[mask > 0] = 255
h, w = mask.shape[0:2]
mask3 = mask.copy()
mask3 = 1.0 - (mask3 / 255.0)
pos = np.zeros((h, w), dtype=np.int32)
direct = np.zeros((h, w, 4), dtype=np.int32)
i = 0
while np.sum(1 - mask3) > 0:
i += 1
mask3_ = cv2.filter2D(mask3, -1, ones_filter)
mask3_[mask3_ > 0] = 1
sub_mask = mask3_ - mask3
pos[sub_mask == 1] = i
m = cv2.filter2D(mask3, -1, d_filter1)
m[m > 0] = 1
m = m - mask3
direct[m == 1, 0] = 1
m = cv2.filter2D(mask3, -1, d_filter2)
m[m > 0] = 1
m = m - mask3
direct[m == 1, 1] = 1
m = cv2.filter2D(mask3, -1, d_filter3)
m[m > 0] = 1
m = m - mask3
direct[m == 1, 2] = 1
m = cv2.filter2D(mask3, -1, d_filter4)
m[m > 0] = 1
m = m - mask3
direct[m == 1, 3] = 1
mask3 = mask3_
abs_pos = pos.copy()
rel_pos = pos / (str_size / 2) # to 0~1 maybe larger than 1
rel_pos = (rel_pos * pos_num).astype(np.int32)
rel_pos = np.clip(rel_pos, 0, pos_num - 1)
if ori_w != w or ori_h != h:
rel_pos = cv2.resize(rel_pos, (ori_w, ori_h), interpolation=cv2.INTER_NEAREST)
rel_pos[ori_mask == 0] = 0
direct = cv2.resize(direct, (ori_w, ori_h), interpolation=cv2.INTER_NEAREST)
direct[ori_mask == 0, :] = 0
return rel_pos, abs_pos, direct
def load_image(img, mask, device, sigma256=3.0):
"""
Args:
img: [H, W, C] RGB
mask: [H, W] 255 为 masks 区域
sigma256:
Returns:
"""
h, w, _ = img.shape
imgh, imgw = img.shape[0:2]
img_256 = resize(img, 256, 256)
mask = (mask > 127).astype(np.uint8) * 255
mask_256 = cv2.resize(mask, (256, 256), interpolation=cv2.INTER_AREA)
mask_256[mask_256 > 0] = 255
mask_512 = cv2.resize(mask, (512, 512), interpolation=cv2.INTER_AREA)
mask_512[mask_512 > 0] = 255
# original skimage implemention
# https://scikit-image.org/docs/stable/api/skimage.feature.html#skimage.feature.canny
# low_threshold: Lower bound for hysteresis thresholding (linking edges). If None, low_threshold is set to 10% of dtype’s max.
# high_threshold: Upper bound for hysteresis thresholding (linking edges). If None, high_threshold is set to 20% of dtype’s max.
try:
import skimage
gray_256 = skimage.color.rgb2gray(img_256)
edge_256 = skimage.feature.canny(gray_256, sigma=3.0, mask=None).astype(float)
# cv2.imwrite("skimage_gray.jpg", (gray_256*255).astype(np.uint8))
# cv2.imwrite("skimage_edge.jpg", (edge_256*255).astype(np.uint8))
except:
gray_256 = cv2.cvtColor(img_256, cv2.COLOR_RGB2GRAY)
gray_256_blured = cv2.GaussianBlur(gray_256, ksize=(7, 7), sigmaX=sigma256, sigmaY=sigma256)
edge_256 = cv2.Canny(gray_256_blured, threshold1=int(255*0.1), threshold2=int(255*0.2))
# cv2.imwrite("opencv_edge.jpg", edge_256)
# line
img_512 = resize(img, 512, 512)
rel_pos, abs_pos, direct = load_masked_position_encoding(mask)
batch = dict()
batch["images"] = to_tensor(img.copy()).unsqueeze(0).to(device)
batch["img_256"] = to_tensor(img_256, norm=True).unsqueeze(0).to(device)
batch["masks"] = to_tensor(mask).unsqueeze(0).to(device)
batch["mask_256"] = to_tensor(mask_256).unsqueeze(0).to(device)
batch["mask_512"] = to_tensor(mask_512).unsqueeze(0).to(device)
batch["edge_256"] = to_tensor(edge_256, scale=False).unsqueeze(0).to(device)
batch["img_512"] = to_tensor(img_512).unsqueeze(0).to(device)
batch["rel_pos"] = torch.LongTensor(rel_pos).unsqueeze(0).to(device)
batch["abs_pos"] = torch.LongTensor(abs_pos).unsqueeze(0).to(device)
batch["direct"] = torch.LongTensor(direct).unsqueeze(0).to(device)
batch["h"] = imgh
batch["w"] = imgw
return batch
def to_device(data, device):
if isinstance(data, torch.Tensor):
return data.to(device)
if isinstance(data, dict):
for key in data:
if isinstance(data[key], torch.Tensor):
data[key] = data[key].to(device)
return data
if isinstance(data, list):
return [to_device(d, device) for d in data]
class ZITS(InpaintModel):
name = "zits"
min_size = 256
pad_mod = 32
pad_to_square = True
def __init__(self, device, **kwargs):
"""
Args:
device:
"""
super().__init__(device)
self.device = device
self.sample_edge_line_iterations = 1
def init_model(self, device, **kwargs):
self.wireframe = load_jit_model(ZITS_WIRE_FRAME_MODEL_URL, device, ZITS_WIRE_FRAME_MODEL_MD5)
self.edge_line = load_jit_model(ZITS_EDGE_LINE_MODEL_URL, device, ZITS_EDGE_LINE_MODEL_MD5)
self.structure_upsample = load_jit_model(
ZITS_STRUCTURE_UPSAMPLE_MODEL_URL, device, ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5
)
self.inpaint = load_jit_model(ZITS_INPAINT_MODEL_URL, device, ZITS_INPAINT_MODEL_MD5)
@staticmethod
def is_downloaded() -> bool:
model_paths = [
get_cache_path_by_url(ZITS_WIRE_FRAME_MODEL_URL),
get_cache_path_by_url(ZITS_EDGE_LINE_MODEL_URL),
get_cache_path_by_url(ZITS_STRUCTURE_UPSAMPLE_MODEL_URL),
get_cache_path_by_url(ZITS_INPAINT_MODEL_URL),
]
return all([os.path.exists(it) for it in model_paths])
def wireframe_edge_and_line(self, items, enable: bool):
# 最终向 items 中添加 edge 和 line key
if not enable:
items["edge"] = torch.zeros_like(items["masks"])
items["line"] = torch.zeros_like(items["masks"])
return
start = time.time()
try:
line_256 = self.wireframe_forward(
items["img_512"],
h=256,
w=256,
masks=items["mask_512"],
mask_th=0.85,
)
except:
line_256 = torch.zeros_like(items["mask_256"])
print(f"wireframe_forward time: {(time.time() - start) * 1000:.2f}ms")
# np_line = (line[0][0].numpy() * 255).astype(np.uint8)
# cv2.imwrite("line.jpg", np_line)
start = time.time()
edge_pred, line_pred = self.sample_edge_line_logits(
context=[items["img_256"], items["edge_256"], line_256],
mask=items["mask_256"].clone(),
iterations=self.sample_edge_line_iterations,
add_v=0.05,
mul_v=4,
)
print(f"sample_edge_line_logits time: {(time.time() - start) * 1000:.2f}ms")
# np_edge_pred = (edge_pred[0][0].numpy() * 255).astype(np.uint8)
# cv2.imwrite("edge_pred.jpg", np_edge_pred)
# np_line_pred = (line_pred[0][0].numpy() * 255).astype(np.uint8)
# cv2.imwrite("line_pred.jpg", np_line_pred)
# exit()
input_size = min(items["h"], items["w"])
if input_size != 256 and input_size > 256:
while edge_pred.shape[2] < input_size:
edge_pred = self.structure_upsample(edge_pred)
edge_pred = torch.sigmoid((edge_pred + 2) * 2)
line_pred = self.structure_upsample(line_pred)
line_pred = torch.sigmoid((line_pred + 2) * 2)
edge_pred = F.interpolate(
edge_pred,
size=(input_size, input_size),
mode="bilinear",
align_corners=False,
)
line_pred = F.interpolate(
line_pred,
size=(input_size, input_size),
mode="bilinear",
align_corners=False,
)
# np_edge_pred = (edge_pred[0][0].numpy() * 255).astype(np.uint8)
# cv2.imwrite("edge_pred_upsample.jpg", np_edge_pred)
# np_line_pred = (line_pred[0][0].numpy() * 255).astype(np.uint8)
# cv2.imwrite("line_pred_upsample.jpg", np_line_pred)
# exit()
items["edge"] = edge_pred.detach()
items["line"] = line_pred.detach()
@torch.no_grad()
def forward(self, image, mask, config: Config):
"""Input images and output images have same size
images: [H, W, C] RGB
masks: [H, W]
return: BGR IMAGE
"""
mask = mask[:, :, 0]
items = load_image(image, mask, device=self.device)
self.wireframe_edge_and_line(items, config.zits_wireframe)
inpainted_image = self.inpaint(
items["images"],
items["masks"],
items["edge"],
items["line"],
items["rel_pos"],
items["direct"],
)
inpainted_image = inpainted_image * 255.0
inpainted_image = (
inpainted_image.cpu().permute(0, 2, 3, 1)[0].numpy().astype(np.uint8)
)
inpainted_image = inpainted_image[:, :, ::-1]
# cv2.imwrite("inpainted.jpg", inpainted_image)
# exit()
return inpainted_image
def wireframe_forward(self, images, h, w, masks, mask_th=0.925):
lcnn_mean = torch.tensor([109.730, 103.832, 98.681]).reshape(1, 3, 1, 1)
lcnn_std = torch.tensor([22.275, 22.124, 23.229]).reshape(1, 3, 1, 1)
images = images * 255.0
# the masks value of lcnn is 127.5
masked_images = images * (1 - masks) + torch.ones_like(images) * masks * 127.5
masked_images = (masked_images - lcnn_mean) / lcnn_std
def to_int(x):
return tuple(map(int, x))
lines_tensor = []
lmap = np.zeros((h, w))
output_masked = self.wireframe(masked_images)
output_masked = to_device(output_masked, "cpu")
if output_masked["num_proposals"] == 0:
lines_masked = []
scores_masked = []
else:
lines_masked = output_masked["lines_pred"].numpy()
lines_masked = [
[line[1] * h, line[0] * w, line[3] * h, line[2] * w]
for line in lines_masked
]
scores_masked = output_masked["lines_score"].numpy()
for line, score in zip(lines_masked, scores_masked):
if score > mask_th:
try:
import skimage
rr, cc, value = skimage.draw.line_aa(
*to_int(line[0:2]), *to_int(line[2:4])
)
lmap[rr, cc] = np.maximum(lmap[rr, cc], value)
except:
cv2.line(lmap, to_int(line[0:2][::-1]), to_int(line[2:4][::-1]), (1, 1, 1), 1, cv2.LINE_AA)
lmap = np.clip(lmap * 255, 0, 255).astype(np.uint8)
lines_tensor.append(to_tensor(lmap).unsqueeze(0))
lines_tensor = torch.cat(lines_tensor, dim=0)
return lines_tensor.detach().to(self.device)
def sample_edge_line_logits(
self, context, mask=None, iterations=1, add_v=0, mul_v=4
):
[img, edge, line] = context
img = img * (1 - mask)
edge = edge * (1 - mask)
line = line * (1 - mask)
for i in range(iterations):
edge_logits, line_logits = self.edge_line(img, edge, line, masks=mask)
edge_pred = torch.sigmoid(edge_logits)
line_pred = torch.sigmoid((line_logits + add_v) * mul_v)
edge = edge + edge_pred * mask
edge[edge >= 0.25] = 1
edge[edge < 0.25] = 0
line = line + line_pred * mask
b, _, h, w = edge_pred.shape
edge_pred = edge_pred.reshape(b, -1, 1)
line_pred = line_pred.reshape(b, -1, 1)
mask = mask.reshape(b, -1)
edge_probs = torch.cat([1 - edge_pred, edge_pred], dim=-1)
line_probs = torch.cat([1 - line_pred, line_pred], dim=-1)
edge_probs[:, :, 1] += 0.5
line_probs[:, :, 1] += 0.5
edge_max_probs = edge_probs.max(dim=-1)[0] + (1 - mask) * (-100)
line_max_probs = line_probs.max(dim=-1)[0] + (1 - mask) * (-100)
indices = torch.sort(
edge_max_probs + line_max_probs, dim=-1, descending=True
)[1]
for ii in range(b):
keep = int((i + 1) / iterations * torch.sum(mask[ii, ...]))
assert torch.sum(mask[ii][indices[ii, :keep]]) == keep, "Error!!!"
mask[ii][indices[ii, :keep]] = 0
mask = mask.reshape(b, 1, h, w)
edge = edge * (1 - mask)
line = line * (1 - mask)
edge, line = edge.to(torch.float32), line.to(torch.float32)
return edge, line
| 15,613 | 33.852679 | 132 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/ddim_sampler.py | import torch
import numpy as np
from tqdm import tqdm
from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from loguru import logger
class DDIMSampler(object):
def __init__(self, model, schedule="linear"):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
setattr(self, name, attr)
def make_schedule(
self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True
):
self.ddim_timesteps = make_ddim_timesteps(
ddim_discr_method=ddim_discretize,
num_ddim_timesteps=ddim_num_steps,
# array([1])
num_ddpm_timesteps=self.ddpm_num_timesteps,
verbose=verbose,
)
alphas_cumprod = self.model.alphas_cumprod # torch.Size([1000])
assert (
alphas_cumprod.shape[0] == self.ddpm_num_timesteps
), "alphas have to be defined for each timestep"
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer("betas", to_torch(self.model.betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer(
"alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev)
)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer(
"sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_one_minus_alphas_cumprod",
to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod",
to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),
)
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(
alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,
verbose=verbose,
)
self.register_buffer("ddim_sigmas", ddim_sigmas)
self.register_buffer("ddim_alphas", ddim_alphas)
self.register_buffer("ddim_alphas_prev", ddim_alphas_prev)
self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev)
/ (1 - self.alphas_cumprod)
* (1 - self.alphas_cumprod / self.alphas_cumprod_prev)
)
self.register_buffer(
"ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps
)
@torch.no_grad()
def sample(self, steps, conditioning, batch_size, shape):
self.make_schedule(ddim_num_steps=steps, ddim_eta=0, verbose=False)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
# samples: 1,3,128,128
return self.ddim_sampling(
conditioning,
size,
quantize_denoised=False,
ddim_use_original_steps=False,
noise_dropout=0,
temperature=1.0,
)
@torch.no_grad()
def ddim_sampling(
self,
cond,
shape,
ddim_use_original_steps=False,
quantize_denoised=False,
temperature=1.0,
noise_dropout=0.0,
):
device = self.model.betas.device
b = shape[0]
img = torch.randn(shape, device=device, dtype=cond.dtype)
timesteps = (
self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
)
time_range = (
reversed(range(0, timesteps))
if ddim_use_original_steps
else np.flip(timesteps)
)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
logger.info(f"Running DDIM Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc="DDIM Sampler", total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
outs = self.p_sample_ddim(
img,
cond,
ts,
index=index,
use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised,
temperature=temperature,
noise_dropout=noise_dropout,
)
img, _ = outs
return img
@torch.no_grad()
def p_sample_ddim(
self,
x,
c,
t,
index,
repeat_noise=False,
use_original_steps=False,
quantize_denoised=False,
temperature=1.0,
noise_dropout=0.0,
):
b, *_, device = *x.shape, x.device
e_t = self.model.apply_model(x, t, c)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = (
self.model.alphas_cumprod_prev
if use_original_steps
else self.ddim_alphas_prev
)
sqrt_one_minus_alphas = (
self.model.sqrt_one_minus_alphas_cumprod
if use_original_steps
else self.ddim_sqrt_one_minus_alphas
)
sigmas = (
self.model.ddim_sigmas_for_original_num_steps
if use_original_steps
else self.ddim_sigmas
)
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full(
(b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device
)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
if quantize_denoised: # 没用
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
# direction pointing to x_t
dir_xt = (1.0 - a_prev - sigma_t ** 2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.0: # 没用
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
| 6,873 | 34.43299 | 99 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/instruct_pix2pix.py | import PIL.Image
import cv2
import torch
from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import set_seed
from lama_cleaner.schema import Config
class InstructPix2Pix(DiffusionInpaintModel):
name = "instruct_pix2pix"
pad_mod = 8
min_size = 512
def init_model(self, device: torch.device, **kwargs):
from diffusers import StableDiffusionInstructPix2PixPipeline
fp16 = not kwargs.get('no_half', False)
model_kwargs = {"local_files_only": kwargs.get('local_files_only', False)}
if kwargs['disable_nsfw'] or kwargs.get('cpu_offload', False):
logger.info("Disable Stable Diffusion Model NSFW checker")
model_kwargs.update(dict(
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False
))
use_gpu = device == torch.device('cuda') and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
self.model = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",
revision="fp16" if use_gpu and fp16 else "main",
torch_dtype=torch_dtype,
**model_kwargs
)
self.model.enable_attention_slicing()
if kwargs.get('enable_xformers', False):
self.model.enable_xformers_memory_efficient_attention()
if kwargs.get('cpu_offload', False) and use_gpu:
logger.info("Enable sequential cpu offload")
self.model.enable_sequential_cpu_offload(gpu_id=0)
else:
self.model = self.model.to(device)
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W, 1] 255 means area to repaint
return: BGR IMAGE
edit = pipe(prompt, image=image, num_inference_steps=20, image_guidance_scale=1.5, guidance_scale=7).images[0]
"""
output = self.model(
image=PIL.Image.fromarray(image),
prompt=config.prompt,
negative_prompt=config.negative_prompt,
num_inference_steps=config.p2p_steps,
image_guidance_scale=config.p2p_image_guidance_scale,
guidance_scale=config.p2p_guidance_scale,
output_type="np.array",
generator=torch.manual_seed(config.sd_seed)
).images[0]
output = (output * 255).round().astype("uint8")
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
#
# def forward_post_process(self, result, image, mask, config):
# if config.sd_match_histograms:
# result = self._match_histograms(result, image[:, :, ::-1], mask)
#
# if config.sd_mask_blur != 0:
# k = 2 * config.sd_mask_blur + 1
# mask = cv2.GaussianBlur(mask, (k, k), 0)
# return result, image, mask
@staticmethod
def is_downloaded() -> bool:
# model will be downloaded when app start, and can't switch in frontend settings
return True
| 3,175 | 36.809524 | 118 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/mat.py | import os
import random
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from lama_cleaner.helper import load_model, get_cache_path_by_url, norm_img
from lama_cleaner.model.base import InpaintModel
from lama_cleaner.model.utils import (
setup_filter,
Conv2dLayer,
FullyConnectedLayer,
conv2d_resample,
bias_act,
upsample2d,
activation_funcs,
MinibatchStdLayer,
to_2tuple,
normalize_2nd_moment,
set_seed,
)
from lama_cleaner.schema import Config
class ModulatedConv2d(nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
style_dim, # dimension of the style code
demodulate=True, # perfrom demodulation
up=1, # Integer upsampling factor.
down=1, # Integer downsampling factor.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output to +-X, None = disable clamping.
):
super().__init__()
self.demodulate = demodulate
self.weight = torch.nn.Parameter(
torch.randn([1, out_channels, in_channels, kernel_size, kernel_size])
)
self.out_channels = out_channels
self.kernel_size = kernel_size
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.padding = self.kernel_size // 2
self.up = up
self.down = down
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(style_dim, in_channels, bias_init=1)
def forward(self, x, style):
batch, in_channels, height, width = x.shape
style = self.affine(style).view(batch, 1, in_channels, 1, 1)
weight = self.weight * self.weight_gain * style
if self.demodulate:
decoefs = (weight.pow(2).sum(dim=[2, 3, 4]) + 1e-8).rsqrt()
weight = weight * decoefs.view(batch, self.out_channels, 1, 1, 1)
weight = weight.view(
batch * self.out_channels, in_channels, self.kernel_size, self.kernel_size
)
x = x.view(1, batch * in_channels, height, width)
x = conv2d_resample(
x=x,
w=weight,
f=self.resample_filter,
up=self.up,
down=self.down,
padding=self.padding,
groups=batch,
)
out = x.view(batch, self.out_channels, *x.shape[2:])
return out
class StyleConv(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
style_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size=3, # Convolution kernel size.
up=1, # Integer upsampling factor.
use_noise=False, # Enable noise input?
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
demodulate=True, # perform demodulation
):
super().__init__()
self.conv = ModulatedConv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
style_dim=style_dim,
demodulate=demodulate,
up=up,
resample_filter=resample_filter,
conv_clamp=conv_clamp,
)
self.use_noise = use_noise
self.resolution = resolution
if use_noise:
self.register_buffer("noise_const", torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.activation = activation
self.act_gain = activation_funcs[activation].def_gain
self.conv_clamp = conv_clamp
def forward(self, x, style, noise_mode="random", gain=1):
x = self.conv(x, style)
assert noise_mode in ["random", "const", "none"]
if self.use_noise:
if noise_mode == "random":
xh, xw = x.size()[-2:]
noise = (
torch.randn([x.shape[0], 1, xh, xw], device=x.device)
* self.noise_strength
)
if noise_mode == "const":
noise = self.noise_const * self.noise_strength
x = x + noise
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
out = bias_act(
x, self.bias, act=self.activation, gain=act_gain, clamp=act_clamp
)
return out
class ToRGB(torch.nn.Module):
def __init__(
self,
in_channels,
out_channels,
style_dim,
kernel_size=1,
resample_filter=[1, 3, 3, 1],
conv_clamp=None,
demodulate=False,
):
super().__init__()
self.conv = ModulatedConv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
style_dim=style_dim,
demodulate=demodulate,
resample_filter=resample_filter,
conv_clamp=conv_clamp,
)
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.conv_clamp = conv_clamp
def forward(self, x, style, skip=None):
x = self.conv(x, style)
out = bias_act(x, self.bias, clamp=self.conv_clamp)
if skip is not None:
if skip.shape != out.shape:
skip = upsample2d(skip, self.resample_filter)
out = out + skip
return out
def get_style_code(a, b):
return torch.cat([a, b], dim=1)
class DecBlockFirst(nn.Module):
def __init__(
self,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
):
super().__init__()
self.fc = FullyConnectedLayer(
in_features=in_channels * 2,
out_features=in_channels * 4 ** 2,
activation=activation,
)
self.conv = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=4,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, ws, gs, E_features, noise_mode="random"):
x = self.fc(x).view(x.shape[0], -1, 4, 4)
x = x + E_features[2]
style = get_style_code(ws[:, 0], gs)
x = self.conv(x, style, noise_mode=noise_mode)
style = get_style_code(ws[:, 1], gs)
img = self.toRGB(x, style, skip=None)
return x, img
class DecBlockFirstV2(nn.Module):
def __init__(
self,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
):
super().__init__()
self.conv0 = Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
)
self.conv1 = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=4,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, ws, gs, E_features, noise_mode="random"):
# x = self.fc(x).view(x.shape[0], -1, 4, 4)
x = self.conv0(x)
x = x + E_features[2]
style = get_style_code(ws[:, 0], gs)
x = self.conv1(x, style, noise_mode=noise_mode)
style = get_style_code(ws[:, 1], gs)
img = self.toRGB(x, style, skip=None)
return x, img
class DecBlock(nn.Module):
def __init__(
self,
res,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
): # res = 2, ..., resolution_log2
super().__init__()
self.res = res
self.conv0 = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
up=2,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.conv1 = StyleConv(
in_channels=out_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, img, ws, gs, E_features, noise_mode="random"):
style = get_style_code(ws[:, self.res * 2 - 5], gs)
x = self.conv0(x, style, noise_mode=noise_mode)
x = x + E_features[self.res]
style = get_style_code(ws[:, self.res * 2 - 4], gs)
x = self.conv1(x, style, noise_mode=noise_mode)
style = get_style_code(ws[:, self.res * 2 - 3], gs)
img = self.toRGB(x, style, skip=img)
return x, img
class MappingNet(torch.nn.Module):
def __init__(
self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers=8, # Number of mapping layers.
embed_features=None, # Label embedding dimensionality, None = same as w_dim.
layer_features=None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
lr_multiplier=0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta=0.995, # Decay for tracking the moving average of W during training, None = do not track.
torch_dtype=torch.float32,
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
self.torch_dtype = torch_dtype
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = (
[z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
)
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(
in_features,
out_features,
activation=activation,
lr_multiplier=lr_multiplier,
)
setattr(self, f"fc{idx}", layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer("w_avg", torch.zeros([w_dim]))
def forward(
self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False
):
# Embed, normalize, and concat inputs.
x = None
if self.z_dim > 0:
x = normalize_2nd_moment(z)
if self.c_dim > 0:
y = normalize_2nd_moment(self.embed(c))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f"fc{idx}")
x = layer(x)
# Update moving average of W.
if self.w_avg_beta is not None and self.training and not skip_w_avg_update:
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(
x[:, :truncation_cutoff], truncation_psi
)
return x
class DisFromRGB(nn.Module):
def __init__(
self, in_channels, out_channels, activation
): # res = 2, ..., resolution_log2
super().__init__()
self.conv = Conv2dLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
activation=activation,
)
def forward(self, x):
return self.conv(x)
class DisBlock(nn.Module):
def __init__(
self, in_channels, out_channels, activation
): # res = 2, ..., resolution_log2
super().__init__()
self.conv0 = Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
)
self.conv1 = Conv2dLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
down=2,
activation=activation,
)
self.skip = Conv2dLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
down=2,
bias=False,
)
def forward(self, x):
skip = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
x = self.conv1(x, gain=np.sqrt(0.5))
out = skip + x
return out
class Discriminator(torch.nn.Module):
def __init__(
self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
channel_base=32768, # Overall multiplier for the number of channels.
channel_max=512, # Maximum number of channels in any layer.
channel_decay=1,
cmap_dim=None, # Dimensionality of mapped conditioning label, None = default.
activation="lrelu",
mbstd_group_size=4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels=1, # Number of features for the minibatch standard deviation layer, 0 = disable.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
resolution_log2 = int(np.log2(img_resolution))
assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4
self.resolution_log2 = resolution_log2
def nf(stage):
return np.clip(
int(channel_base / 2 ** (stage * channel_decay)), 1, channel_max
)
if cmap_dim == None:
cmap_dim = nf(2)
if c_dim == 0:
cmap_dim = 0
self.cmap_dim = cmap_dim
if c_dim > 0:
self.mapping = MappingNet(
z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None
)
Dis = [DisFromRGB(img_channels + 1, nf(resolution_log2), activation)]
for res in range(resolution_log2, 2, -1):
Dis.append(DisBlock(nf(res), nf(res - 1), activation))
if mbstd_num_channels > 0:
Dis.append(
MinibatchStdLayer(
group_size=mbstd_group_size, num_channels=mbstd_num_channels
)
)
Dis.append(
Conv2dLayer(
nf(2) + mbstd_num_channels, nf(2), kernel_size=3, activation=activation
)
)
self.Dis = nn.Sequential(*Dis)
self.fc0 = FullyConnectedLayer(nf(2) * 4 ** 2, nf(2), activation=activation)
self.fc1 = FullyConnectedLayer(nf(2), 1 if cmap_dim == 0 else cmap_dim)
def forward(self, images_in, masks_in, c):
x = torch.cat([masks_in - 0.5, images_in], dim=1)
x = self.Dis(x)
x = self.fc1(self.fc0(x.flatten(start_dim=1)))
if self.c_dim > 0:
cmap = self.mapping(None, c)
if self.cmap_dim > 0:
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
return x
def nf(stage, channel_base=32768, channel_decay=1.0, channel_max=512):
NF = {512: 64, 256: 128, 128: 256, 64: 512, 32: 512, 16: 512, 8: 512, 4: 512}
return NF[2 ** stage]
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = FullyConnectedLayer(
in_features=in_features, out_features=hidden_features, activation="lrelu"
)
self.fc2 = FullyConnectedLayer(
in_features=hidden_features, out_features=out_features
)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = (
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
)
return windows
def window_reverse(windows, window_size: int, H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
# B = windows.shape[0] / (H * W / window_size / window_size)
x = windows.view(
B, H // window_size, W // window_size, window_size, window_size, -1
)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class Conv2dLayerPartial(nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias=True, # Apply additive bias before the activation function?
activation="linear", # Activation function: 'relu', 'lrelu', etc.
up=1, # Integer upsampling factor.
down=1, # Integer downsampling factor.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output to +-X, None = disable clamping.
trainable=True, # Update the weights of this layer during training?
):
super().__init__()
self.conv = Conv2dLayer(
in_channels,
out_channels,
kernel_size,
bias,
activation,
up,
down,
resample_filter,
conv_clamp,
trainable,
)
self.weight_maskUpdater = torch.ones(1, 1, kernel_size, kernel_size)
self.slide_winsize = kernel_size ** 2
self.stride = down
self.padding = kernel_size // 2 if kernel_size % 2 == 1 else 0
def forward(self, x, mask=None):
if mask is not None:
with torch.no_grad():
if self.weight_maskUpdater.type() != x.type():
self.weight_maskUpdater = self.weight_maskUpdater.to(x)
update_mask = F.conv2d(
mask,
self.weight_maskUpdater,
bias=None,
stride=self.stride,
padding=self.padding,
)
mask_ratio = self.slide_winsize / (update_mask.to(torch.float32) + 1e-8)
update_mask = torch.clamp(update_mask, 0, 1) # 0 or 1
mask_ratio = torch.mul(mask_ratio, update_mask).to(x.dtype)
x = self.conv(x)
x = torch.mul(x, mask_ratio)
return x, update_mask
else:
x = self.conv(x)
return x, None
class WindowAttention(nn.Module):
r"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
down_ratio=1,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = FullyConnectedLayer(in_features=dim, out_features=dim)
self.k = FullyConnectedLayer(in_features=dim, out_features=dim)
self.v = FullyConnectedLayer(in_features=dim, out_features=dim)
self.proj = FullyConnectedLayer(in_features=dim, out_features=dim)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask_windows=None, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
norm_x = F.normalize(x, p=2.0, dim=-1, eps=torch.finfo(x.dtype).eps)
q = (
self.q(norm_x)
.reshape(B_, N, self.num_heads, C // self.num_heads)
.permute(0, 2, 1, 3)
)
k = (
self.k(norm_x)
.view(B_, -1, self.num_heads, C // self.num_heads)
.permute(0, 2, 3, 1)
)
v = (
self.v(x)
.view(B_, -1, self.num_heads, C // self.num_heads)
.permute(0, 2, 1, 3)
)
attn = (q @ k) * self.scale
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(
1
).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
if mask_windows is not None:
attn_mask_windows = mask_windows.squeeze(-1).unsqueeze(1).unsqueeze(1)
attn = attn + attn_mask_windows.masked_fill(
attn_mask_windows == 0, float(-100.0)
).masked_fill(attn_mask_windows == 1, float(0.0))
with torch.no_grad():
mask_windows = torch.clamp(
torch.sum(mask_windows, dim=1, keepdim=True), 0, 1
).repeat(1, N, 1)
attn = self.softmax(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
return x, mask_windows
class SwinTransformerBlock(nn.Module):
r"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
input_resolution,
num_heads,
down_ratio=1,
window_size=7,
shift_size=0,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert (
0 <= self.shift_size < self.window_size
), "shift_size must in 0-window_size"
if self.shift_size > 0:
down_ratio = 1
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
down_ratio=down_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.fuse = FullyConnectedLayer(
in_features=dim * 2, out_features=dim, activation="lrelu"
)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
if self.shift_size > 0:
attn_mask = self.calculate_mask(self.input_resolution)
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(
img_mask, self.window_size
) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
attn_mask == 0, float(0.0)
)
return attn_mask
def forward(self, x, x_size, mask=None):
# H, W = self.input_resolution
H, W = x_size
B, L, C = x.shape
# assert L == H * W, "input feature has wrong size"
shortcut = x
x = x.view(B, H, W, C)
if mask is not None:
mask = mask.view(B, H, W, 1)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(
x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
)
if mask is not None:
shifted_mask = torch.roll(
mask, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
)
else:
shifted_x = x
if mask is not None:
shifted_mask = mask
# partition windows
x_windows = window_partition(
shifted_x, self.window_size
) # nW*B, window_size, window_size, C
x_windows = x_windows.view(
-1, self.window_size * self.window_size, C
) # nW*B, window_size*window_size, C
if mask is not None:
mask_windows = window_partition(shifted_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size, 1)
else:
mask_windows = None
# W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
if self.input_resolution == x_size:
attn_windows, mask_windows = self.attn(
x_windows, mask_windows, mask=self.attn_mask
) # nW*B, window_size*window_size, C
else:
attn_windows, mask_windows = self.attn(
x_windows,
mask_windows,
mask=self.calculate_mask(x_size).to(x.dtype).to(x.device),
) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
if mask is not None:
mask_windows = mask_windows.view(-1, self.window_size, self.window_size, 1)
shifted_mask = window_reverse(mask_windows, self.window_size, H, W)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
)
if mask is not None:
mask = torch.roll(
shifted_mask, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
)
else:
x = shifted_x
if mask is not None:
mask = shifted_mask
x = x.view(B, H * W, C)
if mask is not None:
mask = mask.view(B, H * W, 1)
# FFN
x = self.fuse(torch.cat([shortcut, x], dim=-1))
x = self.mlp(x)
return x, mask
class PatchMerging(nn.Module):
def __init__(self, in_channels, out_channels, down=2):
super().__init__()
self.conv = Conv2dLayerPartial(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
activation="lrelu",
down=down,
)
self.down = down
def forward(self, x, x_size, mask=None):
x = token2feature(x, x_size)
if mask is not None:
mask = token2feature(mask, x_size)
x, mask = self.conv(x, mask)
if self.down != 1:
ratio = 1 / self.down
x_size = (int(x_size[0] * ratio), int(x_size[1] * ratio))
x = feature2token(x)
if mask is not None:
mask = feature2token(mask)
return x, x_size, mask
class PatchUpsampling(nn.Module):
def __init__(self, in_channels, out_channels, up=2):
super().__init__()
self.conv = Conv2dLayerPartial(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
activation="lrelu",
up=up,
)
self.up = up
def forward(self, x, x_size, mask=None):
x = token2feature(x, x_size)
if mask is not None:
mask = token2feature(mask, x_size)
x, mask = self.conv(x, mask)
if self.up != 1:
x_size = (int(x_size[0] * self.up), int(x_size[1] * self.up))
x = feature2token(x)
if mask is not None:
mask = feature2token(mask)
return x, x_size, mask
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
dim,
input_resolution,
depth,
num_heads,
window_size,
down_ratio=1,
mlp_ratio=2.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# patch merging layer
if downsample is not None:
# self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
self.downsample = downsample
else:
self.downsample = None
# build blocks
self.blocks = nn.ModuleList(
[
SwinTransformerBlock(
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
down_ratio=down_ratio,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i]
if isinstance(drop_path, list)
else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)
]
)
self.conv = Conv2dLayerPartial(
in_channels=dim, out_channels=dim, kernel_size=3, activation="lrelu"
)
def forward(self, x, x_size, mask=None):
if self.downsample is not None:
x, x_size, mask = self.downsample(x, x_size, mask)
identity = x
for blk in self.blocks:
if self.use_checkpoint:
x, mask = checkpoint.checkpoint(blk, x, x_size, mask)
else:
x, mask = blk(x, x_size, mask)
if mask is not None:
mask = token2feature(mask, x_size)
x, mask = self.conv(token2feature(x, x_size), mask)
x = feature2token(x) + identity
if mask is not None:
mask = feature2token(mask)
return x, x_size, mask
class ToToken(nn.Module):
def __init__(self, in_channels=3, dim=128, kernel_size=5, stride=1):
super().__init__()
self.proj = Conv2dLayerPartial(
in_channels=in_channels,
out_channels=dim,
kernel_size=kernel_size,
activation="lrelu",
)
def forward(self, x, mask):
x, mask = self.proj(x, mask)
return x, mask
class EncFromRGB(nn.Module):
def __init__(
self, in_channels, out_channels, activation
): # res = 2, ..., resolution_log2
super().__init__()
self.conv0 = Conv2dLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
activation=activation,
)
self.conv1 = Conv2dLayer(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
activation=activation,
)
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
return x
class ConvBlockDown(nn.Module):
def __init__(
self, in_channels, out_channels, activation
): # res = 2, ..., resolution_log
super().__init__()
self.conv0 = Conv2dLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
activation=activation,
down=2,
)
self.conv1 = Conv2dLayer(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
activation=activation,
)
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
return x
def token2feature(x, x_size):
B, N, C = x.shape
h, w = x_size
x = x.permute(0, 2, 1).reshape(B, C, h, w)
return x
def feature2token(x):
B, C, H, W = x.shape
x = x.view(B, C, -1).transpose(1, 2)
return x
class Encoder(nn.Module):
def __init__(
self,
res_log2,
img_channels,
activation,
patch_size=5,
channels=16,
drop_path_rate=0.1,
):
super().__init__()
self.resolution = []
for idx, i in enumerate(range(res_log2, 3, -1)): # from input size to 16x16
res = 2 ** i
self.resolution.append(res)
if i == res_log2:
block = EncFromRGB(img_channels * 2 + 1, nf(i), activation)
else:
block = ConvBlockDown(nf(i + 1), nf(i), activation)
setattr(self, "EncConv_Block_%dx%d" % (res, res), block)
def forward(self, x):
out = {}
for res in self.resolution:
res_log2 = int(np.log2(res))
x = getattr(self, "EncConv_Block_%dx%d" % (res, res))(x)
out[res_log2] = x
return out
class ToStyle(nn.Module):
def __init__(self, in_channels, out_channels, activation, drop_rate):
super().__init__()
self.conv = nn.Sequential(
Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
down=2,
),
Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
down=2,
),
Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
down=2,
),
)
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = FullyConnectedLayer(
in_features=in_channels, out_features=out_channels, activation=activation
)
# self.dropout = nn.Dropout(drop_rate)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
x = self.fc(x.flatten(start_dim=1))
# x = self.dropout(x)
return x
class DecBlockFirstV2(nn.Module):
def __init__(
self,
res,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
):
super().__init__()
self.res = res
self.conv0 = Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
)
self.conv1 = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, ws, gs, E_features, noise_mode="random"):
# x = self.fc(x).view(x.shape[0], -1, 4, 4)
x = self.conv0(x)
x = x + E_features[self.res]
style = get_style_code(ws[:, 0], gs)
x = self.conv1(x, style, noise_mode=noise_mode)
style = get_style_code(ws[:, 1], gs)
img = self.toRGB(x, style, skip=None)
return x, img
class DecBlock(nn.Module):
def __init__(
self,
res,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
): # res = 4, ..., resolution_log2
super().__init__()
self.res = res
self.conv0 = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
up=2,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.conv1 = StyleConv(
in_channels=out_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, img, ws, gs, E_features, noise_mode="random"):
style = get_style_code(ws[:, self.res * 2 - 9], gs)
x = self.conv0(x, style, noise_mode=noise_mode)
x = x + E_features[self.res]
style = get_style_code(ws[:, self.res * 2 - 8], gs)
x = self.conv1(x, style, noise_mode=noise_mode)
style = get_style_code(ws[:, self.res * 2 - 7], gs)
img = self.toRGB(x, style, skip=img)
return x, img
class Decoder(nn.Module):
def __init__(
self, res_log2, activation, style_dim, use_noise, demodulate, img_channels
):
super().__init__()
self.Dec_16x16 = DecBlockFirstV2(
4, nf(4), nf(4), activation, style_dim, use_noise, demodulate, img_channels
)
for res in range(5, res_log2 + 1):
setattr(
self,
"Dec_%dx%d" % (2 ** res, 2 ** res),
DecBlock(
res,
nf(res - 1),
nf(res),
activation,
style_dim,
use_noise,
demodulate,
img_channels,
),
)
self.res_log2 = res_log2
def forward(self, x, ws, gs, E_features, noise_mode="random"):
x, img = self.Dec_16x16(x, ws, gs, E_features, noise_mode=noise_mode)
for res in range(5, self.res_log2 + 1):
block = getattr(self, "Dec_%dx%d" % (2 ** res, 2 ** res))
x, img = block(x, img, ws, gs, E_features, noise_mode=noise_mode)
return img
class DecStyleBlock(nn.Module):
def __init__(
self,
res,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
):
super().__init__()
self.res = res
self.conv0 = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
up=2,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.conv1 = StyleConv(
in_channels=out_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, img, style, skip, noise_mode="random"):
x = self.conv0(x, style, noise_mode=noise_mode)
x = x + skip
x = self.conv1(x, style, noise_mode=noise_mode)
img = self.toRGB(x, style, skip=img)
return x, img
class FirstStage(nn.Module):
def __init__(
self,
img_channels,
img_resolution=256,
dim=180,
w_dim=512,
use_noise=False,
demodulate=True,
activation="lrelu",
):
super().__init__()
res = 64
self.conv_first = Conv2dLayerPartial(
in_channels=img_channels + 1,
out_channels=dim,
kernel_size=3,
activation=activation,
)
self.enc_conv = nn.ModuleList()
down_time = int(np.log2(img_resolution // res))
# 根据图片尺寸构建 swim transformer 的层数
for i in range(down_time): # from input size to 64
self.enc_conv.append(
Conv2dLayerPartial(
in_channels=dim,
out_channels=dim,
kernel_size=3,
down=2,
activation=activation,
)
)
# from 64 -> 16 -> 64
depths = [2, 3, 4, 3, 2]
ratios = [1, 1 / 2, 1 / 2, 2, 2]
num_heads = 6
window_sizes = [8, 16, 16, 16, 8]
drop_path_rate = 0.1
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.tran = nn.ModuleList()
for i, depth in enumerate(depths):
res = int(res * ratios[i])
if ratios[i] < 1:
merge = PatchMerging(dim, dim, down=int(1 / ratios[i]))
elif ratios[i] > 1:
merge = PatchUpsampling(dim, dim, up=ratios[i])
else:
merge = None
self.tran.append(
BasicLayer(
dim=dim,
input_resolution=[res, res],
depth=depth,
num_heads=num_heads,
window_size=window_sizes[i],
drop_path=dpr[sum(depths[:i]) : sum(depths[: i + 1])],
downsample=merge,
)
)
# global style
down_conv = []
for i in range(int(np.log2(16))):
down_conv.append(
Conv2dLayer(
in_channels=dim,
out_channels=dim,
kernel_size=3,
down=2,
activation=activation,
)
)
down_conv.append(nn.AdaptiveAvgPool2d((1, 1)))
self.down_conv = nn.Sequential(*down_conv)
self.to_style = FullyConnectedLayer(
in_features=dim, out_features=dim * 2, activation=activation
)
self.ws_style = FullyConnectedLayer(
in_features=w_dim, out_features=dim, activation=activation
)
self.to_square = FullyConnectedLayer(
in_features=dim, out_features=16 * 16, activation=activation
)
style_dim = dim * 3
self.dec_conv = nn.ModuleList()
for i in range(down_time): # from 64 to input size
res = res * 2
self.dec_conv.append(
DecStyleBlock(
res,
dim,
dim,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
)
)
def forward(self, images_in, masks_in, ws, noise_mode="random"):
x = torch.cat([masks_in - 0.5, images_in * masks_in], dim=1)
skips = []
x, mask = self.conv_first(x, masks_in) # input size
skips.append(x)
for i, block in enumerate(self.enc_conv): # input size to 64
x, mask = block(x, mask)
if i != len(self.enc_conv) - 1:
skips.append(x)
x_size = x.size()[-2:]
x = feature2token(x)
mask = feature2token(mask)
mid = len(self.tran) // 2
for i, block in enumerate(self.tran): # 64 to 16
if i < mid:
x, x_size, mask = block(x, x_size, mask)
skips.append(x)
elif i > mid:
x, x_size, mask = block(x, x_size, None)
x = x + skips[mid - i]
else:
x, x_size, mask = block(x, x_size, None)
mul_map = torch.ones_like(x) * 0.5
mul_map = F.dropout(mul_map, training=True)
ws = self.ws_style(ws[:, -1])
add_n = self.to_square(ws).unsqueeze(1)
add_n = (
F.interpolate(
add_n, size=x.size(1), mode="linear", align_corners=False
)
.squeeze(1)
.unsqueeze(-1)
)
x = x * mul_map + add_n * (1 - mul_map)
gs = self.to_style(
self.down_conv(token2feature(x, x_size)).flatten(start_dim=1)
)
style = torch.cat([gs, ws], dim=1)
x = token2feature(x, x_size).contiguous()
img = None
for i, block in enumerate(self.dec_conv):
x, img = block(
x, img, style, skips[len(self.dec_conv) - i - 1], noise_mode=noise_mode
)
# ensemble
img = img * (1 - masks_in) + images_in * masks_in
return img
class SynthesisNet(nn.Module):
def __init__(
self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels=3, # Number of color channels.
channel_base=32768, # Overall multiplier for the number of channels.
channel_decay=1.0,
channel_max=512, # Maximum number of channels in any layer.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
drop_rate=0.5,
use_noise=False,
demodulate=True,
):
super().__init__()
resolution_log2 = int(np.log2(img_resolution))
assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4
self.num_layers = resolution_log2 * 2 - 3 * 2
self.img_resolution = img_resolution
self.resolution_log2 = resolution_log2
# first stage
self.first_stage = FirstStage(
img_channels,
img_resolution=img_resolution,
w_dim=w_dim,
use_noise=False,
demodulate=demodulate,
)
# second stage
self.enc = Encoder(
resolution_log2, img_channels, activation, patch_size=5, channels=16
)
self.to_square = FullyConnectedLayer(
in_features=w_dim, out_features=16 * 16, activation=activation
)
self.to_style = ToStyle(
in_channels=nf(4),
out_channels=nf(2) * 2,
activation=activation,
drop_rate=drop_rate,
)
style_dim = w_dim + nf(2) * 2
self.dec = Decoder(
resolution_log2, activation, style_dim, use_noise, demodulate, img_channels
)
def forward(self, images_in, masks_in, ws, noise_mode="random", return_stg1=False):
out_stg1 = self.first_stage(images_in, masks_in, ws, noise_mode=noise_mode)
# encoder
x = images_in * masks_in + out_stg1 * (1 - masks_in)
x = torch.cat([masks_in - 0.5, x, images_in * masks_in], dim=1)
E_features = self.enc(x)
fea_16 = E_features[4]
mul_map = torch.ones_like(fea_16) * 0.5
mul_map = F.dropout(mul_map, training=True)
add_n = self.to_square(ws[:, 0]).view(-1, 16, 16).unsqueeze(1)
add_n = F.interpolate(
add_n, size=fea_16.size()[-2:], mode="bilinear", align_corners=False
)
fea_16 = fea_16 * mul_map + add_n * (1 - mul_map)
E_features[4] = fea_16
# style
gs = self.to_style(fea_16)
# decoder
img = self.dec(fea_16, ws, gs, E_features, noise_mode=noise_mode)
# ensemble
img = img * (1 - masks_in) + images_in * masks_in
if not return_stg1:
return img
else:
return img, out_stg1
class Generator(nn.Module):
def __init__(
self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # resolution of generated image
img_channels, # Number of input color channels.
synthesis_kwargs={}, # Arguments for SynthesisNetwork.
mapping_kwargs={}, # Arguments for MappingNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNet(
w_dim=w_dim,
img_resolution=img_resolution,
img_channels=img_channels,
**synthesis_kwargs,
)
self.mapping = MappingNet(
z_dim=z_dim,
c_dim=c_dim,
w_dim=w_dim,
num_ws=self.synthesis.num_layers,
**mapping_kwargs,
)
def forward(
self,
images_in,
masks_in,
z,
c,
truncation_psi=1,
truncation_cutoff=None,
skip_w_avg_update=False,
noise_mode="none",
return_stg1=False,
):
ws = self.mapping(
z,
c,
truncation_psi=truncation_psi,
truncation_cutoff=truncation_cutoff,
skip_w_avg_update=skip_w_avg_update,
)
img = self.synthesis(images_in, masks_in, ws, noise_mode=noise_mode)
return img
class Discriminator(torch.nn.Module):
def __init__(
self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
channel_base=32768, # Overall multiplier for the number of channels.
channel_max=512, # Maximum number of channels in any layer.
channel_decay=1,
cmap_dim=None, # Dimensionality of mapped conditioning label, None = default.
activation="lrelu",
mbstd_group_size=4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels=1, # Number of features for the minibatch standard deviation layer, 0 = disable.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
resolution_log2 = int(np.log2(img_resolution))
assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4
self.resolution_log2 = resolution_log2
if cmap_dim == None:
cmap_dim = nf(2)
if c_dim == 0:
cmap_dim = 0
self.cmap_dim = cmap_dim
if c_dim > 0:
self.mapping = MappingNet(
z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None
)
Dis = [DisFromRGB(img_channels + 1, nf(resolution_log2), activation)]
for res in range(resolution_log2, 2, -1):
Dis.append(DisBlock(nf(res), nf(res - 1), activation))
if mbstd_num_channels > 0:
Dis.append(
MinibatchStdLayer(
group_size=mbstd_group_size, num_channels=mbstd_num_channels
)
)
Dis.append(
Conv2dLayer(
nf(2) + mbstd_num_channels, nf(2), kernel_size=3, activation=activation
)
)
self.Dis = nn.Sequential(*Dis)
self.fc0 = FullyConnectedLayer(nf(2) * 4 ** 2, nf(2), activation=activation)
self.fc1 = FullyConnectedLayer(nf(2), 1 if cmap_dim == 0 else cmap_dim)
# for 64x64
Dis_stg1 = [DisFromRGB(img_channels + 1, nf(resolution_log2) // 2, activation)]
for res in range(resolution_log2, 2, -1):
Dis_stg1.append(DisBlock(nf(res) // 2, nf(res - 1) // 2, activation))
if mbstd_num_channels > 0:
Dis_stg1.append(
MinibatchStdLayer(
group_size=mbstd_group_size, num_channels=mbstd_num_channels
)
)
Dis_stg1.append(
Conv2dLayer(
nf(2) // 2 + mbstd_num_channels,
nf(2) // 2,
kernel_size=3,
activation=activation,
)
)
self.Dis_stg1 = nn.Sequential(*Dis_stg1)
self.fc0_stg1 = FullyConnectedLayer(
nf(2) // 2 * 4 ** 2, nf(2) // 2, activation=activation
)
self.fc1_stg1 = FullyConnectedLayer(
nf(2) // 2, 1 if cmap_dim == 0 else cmap_dim
)
def forward(self, images_in, masks_in, images_stg1, c):
x = self.Dis(torch.cat([masks_in - 0.5, images_in], dim=1))
x = self.fc1(self.fc0(x.flatten(start_dim=1)))
x_stg1 = self.Dis_stg1(torch.cat([masks_in - 0.5, images_stg1], dim=1))
x_stg1 = self.fc1_stg1(self.fc0_stg1(x_stg1.flatten(start_dim=1)))
if self.c_dim > 0:
cmap = self.mapping(None, c)
if self.cmap_dim > 0:
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
x_stg1 = (x_stg1 * cmap).sum(dim=1, keepdim=True) * (
1 / np.sqrt(self.cmap_dim)
)
return x, x_stg1
MAT_MODEL_URL = os.environ.get(
"MAT_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_mat/Places_512_FullData_G.pth",
)
MAT_MODEL_MD5 = os.environ.get("MAT_MODEL_MD5", "8ca927835fa3f5e21d65ffcb165377ed")
class MAT(InpaintModel):
name = "mat"
min_size = 512
pad_mod = 512
pad_to_square = True
def init_model(self, device, **kwargs):
seed = 240 # pick up a random number
set_seed(seed)
fp16 = not kwargs.get("no_half", False)
use_gpu = "cuda" in str(device) and torch.cuda.is_available()
self.torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
G = Generator(
z_dim=512,
c_dim=0,
w_dim=512,
img_resolution=512,
img_channels=3,
mapping_kwargs={"torch_dtype": self.torch_dtype},
).to(self.torch_dtype)
# fmt: off
self.model = load_model(G, MAT_MODEL_URL, device, MAT_MODEL_MD5)
self.z = torch.from_numpy(np.random.randn(1, G.z_dim)).to(self.torch_dtype).to(device)
self.label = torch.zeros([1, self.model.c_dim], device=device).to(self.torch_dtype)
# fmt: on
@staticmethod
def is_downloaded() -> bool:
return os.path.exists(get_cache_path_by_url(MAT_MODEL_URL))
def forward(self, image, mask, config: Config):
"""Input images and output images have same size
images: [H, W, C] RGB
masks: [H, W] mask area == 255
return: BGR IMAGE
"""
image = norm_img(image) # [0, 1]
image = image * 2 - 1 # [0, 1] -> [-1, 1]
mask = (mask > 127) * 255
mask = 255 - mask
mask = norm_img(mask)
image = (
torch.from_numpy(image).unsqueeze(0).to(self.torch_dtype).to(self.device)
)
mask = torch.from_numpy(mask).unsqueeze(0).to(self.torch_dtype).to(self.device)
output = self.model(
image, mask, self.z, self.label, truncation_psi=1, noise_mode="none"
)
output = (
(output.permute(0, 2, 3, 1) * 127.5 + 127.5)
.round()
.clamp(0, 255)
.to(torch.uint8)
)
output = output[0].cpu().numpy()
cur_res = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return cur_res
| 62,603 | 31.336777 | 110 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/manga.py | import os
import random
import cv2
import numpy as np
import torch
import time
from loguru import logger
from lama_cleaner.helper import get_cache_path_by_url, load_jit_model
from lama_cleaner.model.base import InpaintModel
from lama_cleaner.schema import Config
MANGA_INPAINTOR_MODEL_URL = os.environ.get(
"MANGA_INPAINTOR_MODEL_URL",
"https://github.com/Sanster/models/releases/download/manga/manga_inpaintor.jit",
)
MANGA_INPAINTOR_MODEL_MD5 = os.environ.get(
"MANGA_INPAINTOR_MODEL_MD5", "7d8b269c4613b6b3768af714610da86c"
)
MANGA_LINE_MODEL_URL = os.environ.get(
"MANGA_LINE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/manga/erika.jit",
)
MANGA_LINE_MODEL_MD5 = os.environ.get(
"MANGA_LINE_MODEL_MD5", "0c926d5a4af8450b0d00bc5b9a095644"
)
class Manga(InpaintModel):
name = "manga"
pad_mod = 16
def init_model(self, device, **kwargs):
self.inpaintor_model = load_jit_model(
MANGA_INPAINTOR_MODEL_URL, device, MANGA_INPAINTOR_MODEL_MD5
)
self.line_model = load_jit_model(
MANGA_LINE_MODEL_URL, device, MANGA_LINE_MODEL_MD5
)
self.seed = 42
@staticmethod
def is_downloaded() -> bool:
model_paths = [
get_cache_path_by_url(MANGA_INPAINTOR_MODEL_URL),
get_cache_path_by_url(MANGA_LINE_MODEL_URL),
]
return all([os.path.exists(it) for it in model_paths])
def forward(self, image, mask, config: Config):
"""
image: [H, W, C] RGB
mask: [H, W, 1]
return: BGR IMAGE
"""
seed = self.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
gray_img = torch.from_numpy(
gray_img[np.newaxis, np.newaxis, :, :].astype(np.float32)
).to(self.device)
start = time.time()
lines = self.line_model(gray_img)
torch.cuda.empty_cache()
lines = torch.clamp(lines, 0, 255)
logger.info(f"erika_model time: {time.time() - start}")
mask = torch.from_numpy(mask[np.newaxis, :, :, :]).to(self.device)
mask = mask.permute(0, 3, 1, 2)
mask = torch.where(mask > 0.5, 1.0, 0.0)
noise = torch.randn_like(mask)
ones = torch.ones_like(mask)
gray_img = gray_img / 255 * 2 - 1.0
lines = lines / 255 * 2 - 1.0
start = time.time()
inpainted_image = self.inpaintor_model(gray_img, lines, mask, noise, ones)
logger.info(f"image_inpaintor_model time: {time.time() - start}")
cur_res = inpainted_image[0].permute(1, 2, 0).detach().cpu().numpy()
cur_res = (cur_res * 127.5 + 127.5).astype(np.uint8)
cur_res = cv2.cvtColor(cur_res, cv2.COLOR_GRAY2BGR)
return cur_res
| 2,884 | 30.358696 | 84 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/ldm.py | import os
import numpy as np
import torch
from loguru import logger
from lama_cleaner.model.base import InpaintModel
from lama_cleaner.model.ddim_sampler import DDIMSampler
from lama_cleaner.model.plms_sampler import PLMSSampler
from lama_cleaner.schema import Config, LDMSampler
torch.manual_seed(42)
import torch.nn as nn
from lama_cleaner.helper import (
download_model,
norm_img,
get_cache_path_by_url,
load_jit_model,
)
from lama_cleaner.model.utils import (
make_beta_schedule,
timestep_embedding,
)
LDM_ENCODE_MODEL_URL = os.environ.get(
"LDM_ENCODE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_ldm/cond_stage_model_encode.pt",
)
LDM_ENCODE_MODEL_MD5 = os.environ.get(
"LDM_ENCODE_MODEL_MD5", "23239fc9081956a3e70de56472b3f296"
)
LDM_DECODE_MODEL_URL = os.environ.get(
"LDM_DECODE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_ldm/cond_stage_model_decode.pt",
)
LDM_DECODE_MODEL_MD5 = os.environ.get(
"LDM_DECODE_MODEL_MD5", "fe419cd15a750d37a4733589d0d3585c"
)
LDM_DIFFUSION_MODEL_URL = os.environ.get(
"LDM_DIFFUSION_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_ldm/diffusion.pt",
)
LDM_DIFFUSION_MODEL_MD5 = os.environ.get(
"LDM_DIFFUSION_MODEL_MD5", "b0afda12bf790c03aba2a7431f11d22d"
)
class DDPM(nn.Module):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
device,
timesteps=1000,
beta_schedule="linear",
linear_start=0.0015,
linear_end=0.0205,
cosine_s=0.008,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
parameterization="eps", # all assuming fixed variance schedules
use_positional_encodings=False,
):
super().__init__()
self.device = device
self.parameterization = parameterization
self.use_positional_encodings = use_positional_encodings
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
self.register_schedule(
beta_schedule=beta_schedule,
timesteps=timesteps,
linear_start=linear_start,
linear_end=linear_end,
cosine_s=cosine_s,
)
def register_schedule(
self,
given_betas=None,
beta_schedule="linear",
timesteps=1000,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
):
betas = make_beta_schedule(
self.device,
beta_schedule,
timesteps,
linear_start=linear_start,
linear_end=linear_end,
cosine_s=cosine_s,
)
alphas = 1.0 - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
(timesteps,) = betas.shape
self.num_timesteps = int(timesteps)
self.linear_start = linear_start
self.linear_end = linear_end
assert (
alphas_cumprod.shape[0] == self.num_timesteps
), "alphas have to be defined for each timestep"
to_torch = lambda x: torch.tensor(x, dtype=torch.float32).to(self.device)
self.register_buffer("betas", to_torch(betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer(
"sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod))
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1))
)
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = (1 - self.v_posterior) * betas * (
1.0 - alphas_cumprod_prev
) / (1.0 - alphas_cumprod) + self.v_posterior * betas
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer("posterior_variance", to_torch(posterior_variance))
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer(
"posterior_log_variance_clipped",
to_torch(np.log(np.maximum(posterior_variance, 1e-20))),
)
self.register_buffer(
"posterior_mean_coef1",
to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)),
)
self.register_buffer(
"posterior_mean_coef2",
to_torch(
(1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod)
),
)
if self.parameterization == "eps":
lvlb_weights = self.betas**2 / (
2
* self.posterior_variance
* to_torch(alphas)
* (1 - self.alphas_cumprod)
)
elif self.parameterization == "x0":
lvlb_weights = (
0.5
* np.sqrt(torch.Tensor(alphas_cumprod))
/ (2.0 * 1 - torch.Tensor(alphas_cumprod))
)
else:
raise NotImplementedError("mu not supported")
# TODO how to choose this term
lvlb_weights[0] = lvlb_weights[1]
self.register_buffer("lvlb_weights", lvlb_weights, persistent=False)
assert not torch.isnan(self.lvlb_weights).all()
class LatentDiffusion(DDPM):
def __init__(
self,
diffusion_model,
device,
cond_stage_key="image",
cond_stage_trainable=False,
concat_mode=True,
scale_factor=1.0,
scale_by_std=False,
*args,
**kwargs,
):
self.num_timesteps_cond = 1
self.scale_by_std = scale_by_std
super().__init__(device, *args, **kwargs)
self.diffusion_model = diffusion_model
self.concat_mode = concat_mode
self.cond_stage_trainable = cond_stage_trainable
self.cond_stage_key = cond_stage_key
self.num_downs = 2
self.scale_factor = scale_factor
def make_cond_schedule(
self,
):
self.cond_ids = torch.full(
size=(self.num_timesteps,),
fill_value=self.num_timesteps - 1,
dtype=torch.long,
)
ids = torch.round(
torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)
).long()
self.cond_ids[: self.num_timesteps_cond] = ids
def register_schedule(
self,
given_betas=None,
beta_schedule="linear",
timesteps=1000,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
):
super().register_schedule(
given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s
)
self.shorten_cond_schedule = self.num_timesteps_cond > 1
if self.shorten_cond_schedule:
self.make_cond_schedule()
def apply_model(self, x_noisy, t, cond):
# x_recon = self.model(x_noisy, t, cond['c_concat'][0]) # cond['c_concat'][0].shape 1,4,128,128
t_emb = timestep_embedding(x_noisy.device, t, 256, repeat_only=False)
x_recon = self.diffusion_model(x_noisy, t_emb, cond)
return x_recon
class LDM(InpaintModel):
name = "ldm"
pad_mod = 32
def __init__(self, device, fp16: bool = True, **kwargs):
self.fp16 = fp16
super().__init__(device)
self.device = device
def init_model(self, device, **kwargs):
self.diffusion_model = load_jit_model(
LDM_DIFFUSION_MODEL_URL, device, LDM_DIFFUSION_MODEL_MD5
)
self.cond_stage_model_decode = load_jit_model(
LDM_DECODE_MODEL_URL, device, LDM_DECODE_MODEL_MD5
)
self.cond_stage_model_encode = load_jit_model(
LDM_ENCODE_MODEL_URL, device, LDM_ENCODE_MODEL_MD5
)
if self.fp16 and "cuda" in str(device):
self.diffusion_model = self.diffusion_model.half()
self.cond_stage_model_decode = self.cond_stage_model_decode.half()
self.cond_stage_model_encode = self.cond_stage_model_encode.half()
self.model = LatentDiffusion(self.diffusion_model, device)
@staticmethod
def is_downloaded() -> bool:
model_paths = [
get_cache_path_by_url(LDM_DIFFUSION_MODEL_URL),
get_cache_path_by_url(LDM_DECODE_MODEL_URL),
get_cache_path_by_url(LDM_ENCODE_MODEL_URL),
]
return all([os.path.exists(it) for it in model_paths])
@torch.cuda.amp.autocast()
def forward(self, image, mask, config: Config):
"""
image: [H, W, C] RGB
mask: [H, W, 1]
return: BGR IMAGE
"""
# image [1,3,512,512] float32
# mask: [1,1,512,512] float32
# masked_image: [1,3,512,512] float32
if config.ldm_sampler == LDMSampler.ddim:
sampler = DDIMSampler(self.model)
elif config.ldm_sampler == LDMSampler.plms:
sampler = PLMSSampler(self.model)
else:
raise ValueError()
steps = config.ldm_steps
image = norm_img(image)
mask = norm_img(mask)
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
image = torch.from_numpy(image).unsqueeze(0).to(self.device)
mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
masked_image = (1 - mask) * image
mask = self._norm(mask)
masked_image = self._norm(masked_image)
c = self.cond_stage_model_encode(masked_image)
torch.cuda.empty_cache()
cc = torch.nn.functional.interpolate(mask, size=c.shape[-2:]) # 1,1,128,128
c = torch.cat((c, cc), dim=1) # 1,4,128,128
shape = (c.shape[1] - 1,) + c.shape[2:]
samples_ddim = sampler.sample(
steps=steps, conditioning=c, batch_size=c.shape[0], shape=shape
)
torch.cuda.empty_cache()
x_samples_ddim = self.cond_stage_model_decode(
samples_ddim
) # samples_ddim: 1, 3, 128, 128 float32
torch.cuda.empty_cache()
# image = torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0)
# mask = torch.clamp((mask + 1.0) / 2.0, min=0.0, max=1.0)
inpainted_image = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
# inpainted = (1 - mask) * image + mask * predicted_image
inpainted_image = inpainted_image.cpu().numpy().transpose(0, 2, 3, 1)[0] * 255
inpainted_image = inpainted_image.astype(np.uint8)[:, :, ::-1]
return inpainted_image
def _norm(self, tensor):
return tensor * 2.0 - 1.0
| 11,275 | 33.169697 | 116 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/__init__.py | 0 | 0 | 0 | py | |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/paint_by_example.py | import PIL
import PIL.Image
import cv2
import torch
from diffusers import DiffusionPipeline
from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import set_seed
from lama_cleaner.schema import Config
class PaintByExample(DiffusionInpaintModel):
name = "paint_by_example"
pad_mod = 8
min_size = 512
def init_model(self, device: torch.device, **kwargs):
fp16 = not kwargs.get('no_half', False)
use_gpu = device == torch.device('cuda') and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
model_kwargs = {"local_files_only": kwargs.get('local_files_only', False)}
if kwargs['disable_nsfw'] or kwargs.get('cpu_offload', False):
logger.info("Disable Paint By Example Model NSFW checker")
model_kwargs.update(dict(
safety_checker=None,
requires_safety_checker=False
))
self.model = DiffusionPipeline.from_pretrained(
"Fantasy-Studio/Paint-by-Example",
torch_dtype=torch_dtype,
**model_kwargs
)
self.model.enable_attention_slicing()
if kwargs.get('enable_xformers', False):
self.model.enable_xformers_memory_efficient_attention()
# TODO: gpu_id
if kwargs.get('cpu_offload', False) and use_gpu:
self.model.image_encoder = self.model.image_encoder.to(device)
self.model.enable_sequential_cpu_offload(gpu_id=0)
else:
self.model = self.model.to(device)
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W, 1] 255 means area to repaint
return: BGR IMAGE
"""
output = self.model(
image=PIL.Image.fromarray(image),
mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"),
example_image=config.paint_by_example_example_image,
num_inference_steps=config.paint_by_example_steps,
output_type='np.array',
generator=torch.manual_seed(config.paint_by_example_seed)
).images[0]
output = (output * 255).round().astype("uint8")
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
def forward_post_process(self, result, image, mask, config):
if config.paint_by_example_match_histograms:
result = self._match_histograms(result, image[:, :, ::-1], mask)
if config.paint_by_example_mask_blur != 0:
k = 2 * config.paint_by_example_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)
return result, image, mask
@staticmethod
def is_downloaded() -> bool:
# model will be downloaded when app start, and can't switch in frontend settings
return True
| 2,934 | 35.6875 | 88 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/plms_sampler.py | # From: https://github.com/CompVis/latent-diffusion/blob/main/ldm/models/diffusion/plms.py
import torch
import numpy as np
from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from tqdm import tqdm
class PLMSSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
if ddim_eta != 0:
raise ValueError('ddim_eta must be 0 for PLMS')
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta, verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self,
steps,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=False,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
**kwargs
):
if conditioning is not None:
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
else:
if conditioning.shape[0] != batch_size:
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
self.make_schedule(ddim_num_steps=steps, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
print(f'Data shape for PLMS sampling is {size}')
samples = self.plms_sampling(conditioning, size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask, x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
)
return samples
@torch.no_grad()
def plms_sampling(self, cond, shape,
x_T=None, ddim_use_original_steps=False,
callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, ):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
time_range = list(reversed(range(0, timesteps))) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
print(f"Running PLMS Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
old_eps = []
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
img = img_orig * mask + (1. - mask) * img
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised, temperature=temperature,
noise_dropout=noise_dropout, score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
old_eps=old_eps, t_next=ts_next)
img, pred_x0, e_t = outs
old_eps.append(e_t)
if len(old_eps) >= 4:
old_eps.pop(0)
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
return img
@torch.no_grad()
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):
b, *_, device = *x.shape, x.device
def get_model_output(x, t):
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
return e_t
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
def get_x_prev_and_pred_x0(e_t, index):
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
e_t = get_model_output(x, t)
if len(old_eps) == 0:
# Pseudo Improved Euler (2nd order)
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
e_t_next = get_model_output(x_prev, t_next)
e_t_prime = (e_t + e_t_next) / 2
elif len(old_eps) == 1:
# 2nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (3 * e_t - old_eps[-1]) / 2
elif len(old_eps) == 2:
# 3nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
elif len(old_eps) >= 3:
# 4nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
return x_prev, pred_x0, e_t
| 11,851 | 51.442478 | 131 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/fcf.py | import os
import random
import cv2
import torch
import numpy as np
import torch.fft as fft
from lama_cleaner.schema import Config
from lama_cleaner.helper import (
load_model,
get_cache_path_by_url,
norm_img,
boxes_from_mask,
resize_max_size,
)
from lama_cleaner.model.base import InpaintModel
from torch import conv2d, nn
import torch.nn.functional as F
from lama_cleaner.model.utils import (
setup_filter,
_parse_scaling,
_parse_padding,
Conv2dLayer,
FullyConnectedLayer,
MinibatchStdLayer,
activation_funcs,
conv2d_resample,
bias_act,
upsample2d,
normalize_2nd_moment,
downsample2d,
)
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl="cuda"):
assert isinstance(x, torch.Tensor)
return _upfirdn2d_ref(
x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain
)
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops."""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert f.dtype == torch.float32 and not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(
x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]
)
x = x[
:,
:,
max(-pady0, 0) : x.shape[2] - max(-pady1, 0),
max(-padx0, 0) : x.shape[3] - max(-padx1, 0),
]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
class EncoderEpilogue(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label.
z_dim, # Output Latent (Z) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
architecture="resnet", # Architecture: 'orig', 'skip', 'resnet'.
mbstd_group_size=4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels=1, # Number of features for the minibatch standard deviation layer, 0 = disable.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
):
assert architecture in ["orig", "skip", "resnet"]
super().__init__()
self.in_channels = in_channels
self.cmap_dim = cmap_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
if architecture == "skip":
self.fromrgb = Conv2dLayer(
self.img_channels, in_channels, kernel_size=1, activation=activation
)
self.mbstd = (
MinibatchStdLayer(
group_size=mbstd_group_size, num_channels=mbstd_num_channels
)
if mbstd_num_channels > 0
else None
)
self.conv = Conv2dLayer(
in_channels + mbstd_num_channels,
in_channels,
kernel_size=3,
activation=activation,
conv_clamp=conv_clamp,
)
self.fc = FullyConnectedLayer(
in_channels * (resolution**2), z_dim, activation=activation
)
self.dropout = torch.nn.Dropout(p=0.5)
def forward(self, x, cmap, force_fp32=False):
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
# FromRGB.
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
if self.mbstd is not None:
x = self.mbstd(x)
const_e = self.conv(x)
x = self.fc(const_e.flatten(1))
x = self.dropout(x)
# Conditioning.
if self.cmap_dim > 0:
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
assert x.dtype == dtype
return x, const_e
class EncoderBlock(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels, 0 = first block.
tmp_channels, # Number of intermediate channels.
out_channels, # Number of output channels.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
first_layer_idx, # Index of the first layer.
architecture="skip", # Architecture: 'orig', 'skip', 'resnet'.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16=False, # Use FP16 for this block?
fp16_channels_last=False, # Use channels-last memory format with FP16?
freeze_layers=0, # Freeze-D: Number of layers to freeze.
):
assert in_channels in [0, tmp_channels]
assert architecture in ["orig", "skip", "resnet"]
super().__init__()
self.in_channels = in_channels
self.resolution = resolution
self.img_channels = img_channels + 1
self.first_layer_idx = first_layer_idx
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = use_fp16 and fp16_channels_last
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.num_layers = 0
def trainable_gen():
while True:
layer_idx = self.first_layer_idx + self.num_layers
trainable = layer_idx >= freeze_layers
self.num_layers += 1
yield trainable
trainable_iter = trainable_gen()
if in_channels == 0:
self.fromrgb = Conv2dLayer(
self.img_channels,
tmp_channels,
kernel_size=1,
activation=activation,
trainable=next(trainable_iter),
conv_clamp=conv_clamp,
channels_last=self.channels_last,
)
self.conv0 = Conv2dLayer(
tmp_channels,
tmp_channels,
kernel_size=3,
activation=activation,
trainable=next(trainable_iter),
conv_clamp=conv_clamp,
channels_last=self.channels_last,
)
self.conv1 = Conv2dLayer(
tmp_channels,
out_channels,
kernel_size=3,
activation=activation,
down=2,
trainable=next(trainable_iter),
resample_filter=resample_filter,
conv_clamp=conv_clamp,
channels_last=self.channels_last,
)
if architecture == "resnet":
self.skip = Conv2dLayer(
tmp_channels,
out_channels,
kernel_size=1,
bias=False,
down=2,
trainable=next(trainable_iter),
resample_filter=resample_filter,
channels_last=self.channels_last,
)
def forward(self, x, img, force_fp32=False):
# dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
dtype = torch.float32
memory_format = (
torch.channels_last
if self.channels_last and not force_fp32
else torch.contiguous_format
)
# Input.
if x is not None:
x = x.to(dtype=dtype, memory_format=memory_format)
# FromRGB.
if self.in_channels == 0:
img = img.to(dtype=dtype, memory_format=memory_format)
y = self.fromrgb(img)
x = x + y if x is not None else y
img = (
downsample2d(img, self.resample_filter)
if self.architecture == "skip"
else None
)
# Main layers.
if self.architecture == "resnet":
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
feat = x.clone()
x = self.conv1(x, gain=np.sqrt(0.5))
x = y.add_(x)
else:
x = self.conv0(x)
feat = x.clone()
x = self.conv1(x)
assert x.dtype == dtype
return x, img, feat
class EncoderNetwork(torch.nn.Module):
def __init__(
self,
c_dim, # Conditioning label (C) dimensionality.
z_dim, # Input latent (Z) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture="orig", # Architecture: 'orig', 'skip', 'resnet'.
channel_base=16384, # Overall multiplier for the number of channels.
channel_max=512, # Maximum number of channels in any layer.
num_fp16_res=0, # Use FP16 for the N highest resolutions.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim=None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs={}, # Arguments for DiscriminatorBlock.
mapping_kwargs={}, # Arguments for MappingNetwork.
epilogue_kwargs={}, # Arguments for EncoderEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.z_dim = z_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [
2**i for i in range(self.img_resolution_log2, 2, -1)
]
channels_dict = {
res: min(channel_base // res, channel_max)
for res in self.block_resolutions + [4]
}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(
img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp
)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = res >= fp16_resolution
use_fp16 = False
block = EncoderBlock(
in_channels,
tmp_channels,
out_channels,
resolution=res,
first_layer_idx=cur_layer_idx,
use_fp16=use_fp16,
**block_kwargs,
**common_kwargs,
)
setattr(self, f"b{res}", block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(
z_dim=0,
c_dim=c_dim,
w_dim=cmap_dim,
num_ws=None,
w_avg_beta=None,
**mapping_kwargs,
)
self.b4 = EncoderEpilogue(
channels_dict[4],
cmap_dim=cmap_dim,
z_dim=z_dim * 2,
resolution=4,
**epilogue_kwargs,
**common_kwargs,
)
def forward(self, img, c, **block_kwargs):
x = None
feats = {}
for res in self.block_resolutions:
block = getattr(self, f"b{res}")
x, img, feat = block(x, img, **block_kwargs)
feats[res] = feat
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x, const_e = self.b4(x, cmap)
feats[4] = const_e
B, _ = x.shape
z = torch.zeros(
(B, self.z_dim), requires_grad=False, dtype=x.dtype, device=x.device
) ## Noise for Co-Modulation
return x, z, feats
def fma(a, b, c): # => a * b + c
return _FusedMultiplyAdd.apply(a, b, c)
class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
@staticmethod
def forward(ctx, a, b, c): # pylint: disable=arguments-differ
out = torch.addcmul(c, a, b)
ctx.save_for_backward(a, b)
ctx.c_shape = c.shape
return out
@staticmethod
def backward(ctx, dout): # pylint: disable=arguments-differ
a, b = ctx.saved_tensors
c_shape = ctx.c_shape
da = None
db = None
dc = None
if ctx.needs_input_grad[0]:
da = _unbroadcast(dout * b, a.shape)
if ctx.needs_input_grad[1]:
db = _unbroadcast(dout * a, b.shape)
if ctx.needs_input_grad[2]:
dc = _unbroadcast(dout, c_shape)
return da, db, dc
def _unbroadcast(x, shape):
extra_dims = x.ndim - len(shape)
assert extra_dims >= 0
dim = [
i
for i in range(x.ndim)
if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)
]
if len(dim):
x = x.sum(dim=dim, keepdim=True)
if extra_dims:
x = x.reshape(-1, *x.shape[extra_dims + 1 :])
assert x.shape == shape
return x
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise=None, # Optional noise tensor to add to the output activations.
up=1, # Integer upsampling factor.
down=1, # Integer downsampling factor.
padding=0, # Padding with respect to the upsampled image.
resample_filter=None,
# Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate=True, # Apply weight demodulation?
flip_weight=True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv=True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (
1
/ np.sqrt(in_channels * kh * kw)
/ weight.norm(float("inf"), dim=[1, 2, 3], keepdim=True)
) # max_Ikk
styles = styles / styles.norm(float("inf"), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(
x=x,
w=weight.to(x.dtype),
f=resample_filter,
up=up,
down=down,
padding=padding,
flip_weight=flip_weight,
)
if demodulate and noise is not None:
x = fma(
x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype)
)
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
batch_size = int(batch_size)
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample(
x=x,
w=w.to(x.dtype),
f=resample_filter,
up=up,
down=down,
padding=padding,
groups=batch_size,
flip_weight=flip_weight,
)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x
class SynthesisLayer(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size=3, # Convolution kernel size.
up=1, # Integer upsampling factor.
use_noise=True, # Enable noise input?
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
channels_last=False, # Use channels_last format for the weights?
):
super().__init__()
self.resolution = resolution
self.up = up
self.use_noise = use_noise
self.activation = activation
self.conv_clamp = conv_clamp
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.padding = kernel_size // 2
self.act_gain = activation_funcs[activation].def_gain
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = (
torch.channels_last if channels_last else torch.contiguous_format
)
self.weight = torch.nn.Parameter(
torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
memory_format=memory_format
)
)
if use_noise:
self.register_buffer("noise_const", torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
def forward(self, x, w, noise_mode="none", fused_modconv=True, gain=1):
assert noise_mode in ["random", "const", "none"]
in_resolution = self.resolution // self.up
styles = self.affine(w)
noise = None
if self.use_noise and noise_mode == "random":
noise = (
torch.randn(
[x.shape[0], 1, self.resolution, self.resolution], device=x.device
)
* self.noise_strength
)
if self.use_noise and noise_mode == "const":
noise = self.noise_const * self.noise_strength
flip_weight = self.up == 1 # slightly faster
x = modulated_conv2d(
x=x,
weight=self.weight,
styles=styles,
noise=noise,
up=self.up,
padding=self.padding,
resample_filter=self.resample_filter,
flip_weight=flip_weight,
fused_modconv=fused_modconv,
)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = F.leaky_relu(x, negative_slope=0.2, inplace=False)
if act_gain != 1:
x = x * act_gain
if act_clamp is not None:
x = x.clamp(-act_clamp, act_clamp)
return x
class ToRGBLayer(torch.nn.Module):
def __init__(
self,
in_channels,
out_channels,
w_dim,
kernel_size=1,
conv_clamp=None,
channels_last=False,
):
super().__init__()
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = (
torch.channels_last if channels_last else torch.contiguous_format
)
self.weight = torch.nn.Parameter(
torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
memory_format=memory_format
)
)
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size**2))
def forward(self, x, w, fused_modconv=True):
styles = self.affine(w) * self.weight_gain
x = modulated_conv2d(
x=x,
weight=self.weight,
styles=styles,
demodulate=False,
fused_modconv=fused_modconv,
)
x = bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
return x
class SynthesisForeword(torch.nn.Module):
def __init__(
self,
z_dim, # Output Latent (Z) dimensionality.
resolution, # Resolution of this block.
in_channels,
img_channels, # Number of input color channels.
architecture="skip", # Architecture: 'orig', 'skip', 'resnet'.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
):
super().__init__()
self.in_channels = in_channels
self.z_dim = z_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
self.fc = FullyConnectedLayer(
self.z_dim, (self.z_dim // 2) * 4 * 4, activation=activation
)
self.conv = SynthesisLayer(
self.in_channels, self.in_channels, w_dim=(z_dim // 2) * 3, resolution=4
)
if architecture == "skip":
self.torgb = ToRGBLayer(
self.in_channels,
self.img_channels,
kernel_size=1,
w_dim=(z_dim // 2) * 3,
)
def forward(self, x, ws, feats, img, force_fp32=False):
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
x_global = x.clone()
# ToRGB.
x = self.fc(x)
x = x.view(-1, self.z_dim // 2, 4, 4)
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
x_skip = feats[4].clone()
x = x + x_skip
mod_vector = []
mod_vector.append(ws[:, 0])
mod_vector.append(x_global.clone())
mod_vector = torch.cat(mod_vector, dim=1)
x = self.conv(x, mod_vector)
mod_vector = []
mod_vector.append(ws[:, 2 * 2 - 3])
mod_vector.append(x_global.clone())
mod_vector = torch.cat(mod_vector, dim=1)
if self.architecture == "skip":
img = self.torgb(x, mod_vector)
img = img.to(dtype=torch.float32, memory_format=torch.contiguous_format)
assert x.dtype == dtype
return x, img
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=False),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid(),
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
res = x * y.expand_as(x)
return res
class FourierUnit(nn.Module):
def __init__(
self,
in_channels,
out_channels,
groups=1,
spatial_scale_factor=None,
spatial_scale_mode="bilinear",
spectral_pos_encoding=False,
use_se=False,
se_kwargs=None,
ffc3d=False,
fft_norm="ortho",
):
# bn_layer not used
super(FourierUnit, self).__init__()
self.groups = groups
self.conv_layer = torch.nn.Conv2d(
in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0),
out_channels=out_channels * 2,
kernel_size=1,
stride=1,
padding=0,
groups=self.groups,
bias=False,
)
self.relu = torch.nn.ReLU(inplace=False)
# squeeze and excitation block
self.use_se = use_se
if use_se:
if se_kwargs is None:
se_kwargs = {}
self.se = SELayer(self.conv_layer.in_channels, **se_kwargs)
self.spatial_scale_factor = spatial_scale_factor
self.spatial_scale_mode = spatial_scale_mode
self.spectral_pos_encoding = spectral_pos_encoding
self.ffc3d = ffc3d
self.fft_norm = fft_norm
def forward(self, x):
batch = x.shape[0]
if self.spatial_scale_factor is not None:
orig_size = x.shape[-2:]
x = F.interpolate(
x,
scale_factor=self.spatial_scale_factor,
mode=self.spatial_scale_mode,
align_corners=False,
)
r_size = x.size()
# (batch, c, h, w/2+1, 2)
fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1)
ffted = fft.rfftn(x, dim=fft_dim, norm=self.fft_norm)
ffted = torch.stack((ffted.real, ffted.imag), dim=-1)
ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1)
ffted = ffted.view(
(
batch,
-1,
)
+ ffted.size()[3:]
)
if self.spectral_pos_encoding:
height, width = ffted.shape[-2:]
coords_vert = (
torch.linspace(0, 1, height)[None, None, :, None]
.expand(batch, 1, height, width)
.to(ffted)
)
coords_hor = (
torch.linspace(0, 1, width)[None, None, None, :]
.expand(batch, 1, height, width)
.to(ffted)
)
ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1)
if self.use_se:
ffted = self.se(ffted)
ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1)
ffted = self.relu(ffted)
ffted = (
ffted.view(
(
batch,
-1,
2,
)
+ ffted.size()[2:]
)
.permute(0, 1, 3, 4, 2)
.contiguous()
) # (batch,c, t, h, w/2+1, 2)
ffted = torch.complex(ffted[..., 0], ffted[..., 1])
ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:]
output = torch.fft.irfftn(
ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm
)
if self.spatial_scale_factor is not None:
output = F.interpolate(
output,
size=orig_size,
mode=self.spatial_scale_mode,
align_corners=False,
)
return output
class SpectralTransform(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride=1,
groups=1,
enable_lfu=True,
**fu_kwargs,
):
# bn_layer not used
super(SpectralTransform, self).__init__()
self.enable_lfu = enable_lfu
if stride == 2:
self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
else:
self.downsample = nn.Identity()
self.stride = stride
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels, out_channels // 2, kernel_size=1, groups=groups, bias=False
),
# nn.BatchNorm2d(out_channels // 2),
nn.ReLU(inplace=True),
)
self.fu = FourierUnit(out_channels // 2, out_channels // 2, groups, **fu_kwargs)
if self.enable_lfu:
self.lfu = FourierUnit(out_channels // 2, out_channels // 2, groups)
self.conv2 = torch.nn.Conv2d(
out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False
)
def forward(self, x):
x = self.downsample(x)
x = self.conv1(x)
output = self.fu(x)
if self.enable_lfu:
n, c, h, w = x.shape
split_no = 2
split_s = h // split_no
xs = torch.cat(
torch.split(x[:, : c // 4], split_s, dim=-2), dim=1
).contiguous()
xs = torch.cat(torch.split(xs, split_s, dim=-1), dim=1).contiguous()
xs = self.lfu(xs)
xs = xs.repeat(1, 1, split_no, split_no).contiguous()
else:
xs = 0
output = self.conv2(x + output + xs)
return output
class FFC(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
ratio_gin,
ratio_gout,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
enable_lfu=True,
padding_type="reflect",
gated=False,
**spectral_kwargs,
):
super(FFC, self).__init__()
assert stride == 1 or stride == 2, "Stride should be 1 or 2."
self.stride = stride
in_cg = int(in_channels * ratio_gin)
in_cl = in_channels - in_cg
out_cg = int(out_channels * ratio_gout)
out_cl = out_channels - out_cg
# groups_g = 1 if groups == 1 else int(groups * ratio_gout)
# groups_l = 1 if groups == 1 else groups - groups_g
self.ratio_gin = ratio_gin
self.ratio_gout = ratio_gout
self.global_in_num = in_cg
module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d
self.convl2l = module(
in_cl,
out_cl,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode=padding_type,
)
module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d
self.convl2g = module(
in_cl,
out_cg,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode=padding_type,
)
module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d
self.convg2l = module(
in_cg,
out_cl,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode=padding_type,
)
module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform
self.convg2g = module(
in_cg,
out_cg,
stride,
1 if groups == 1 else groups // 2,
enable_lfu,
**spectral_kwargs,
)
self.gated = gated
module = (
nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d
)
self.gate = module(in_channels, 2, 1)
def forward(self, x, fname=None):
x_l, x_g = x if type(x) is tuple else (x, 0)
out_xl, out_xg = 0, 0
if self.gated:
total_input_parts = [x_l]
if torch.is_tensor(x_g):
total_input_parts.append(x_g)
total_input = torch.cat(total_input_parts, dim=1)
gates = torch.sigmoid(self.gate(total_input))
g2l_gate, l2g_gate = gates.chunk(2, dim=1)
else:
g2l_gate, l2g_gate = 1, 1
spec_x = self.convg2g(x_g)
if self.ratio_gout != 1:
out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate
if self.ratio_gout != 0:
out_xg = self.convl2g(x_l) * l2g_gate + spec_x
return out_xl, out_xg
class FFC_BN_ACT(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
ratio_gin,
ratio_gout,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
norm_layer=nn.SyncBatchNorm,
activation_layer=nn.Identity,
padding_type="reflect",
enable_lfu=True,
**kwargs,
):
super(FFC_BN_ACT, self).__init__()
self.ffc = FFC(
in_channels,
out_channels,
kernel_size,
ratio_gin,
ratio_gout,
stride,
padding,
dilation,
groups,
bias,
enable_lfu,
padding_type=padding_type,
**kwargs,
)
lnorm = nn.Identity if ratio_gout == 1 else norm_layer
gnorm = nn.Identity if ratio_gout == 0 else norm_layer
global_channels = int(out_channels * ratio_gout)
# self.bn_l = lnorm(out_channels - global_channels)
# self.bn_g = gnorm(global_channels)
lact = nn.Identity if ratio_gout == 1 else activation_layer
gact = nn.Identity if ratio_gout == 0 else activation_layer
self.act_l = lact(inplace=True)
self.act_g = gact(inplace=True)
def forward(self, x, fname=None):
x_l, x_g = self.ffc(
x,
fname=fname,
)
x_l = self.act_l(x_l)
x_g = self.act_g(x_g)
return x_l, x_g
class FFCResnetBlock(nn.Module):
def __init__(
self,
dim,
padding_type,
norm_layer,
activation_layer=nn.ReLU,
dilation=1,
spatial_transform_kwargs=None,
inline=False,
ratio_gin=0.75,
ratio_gout=0.75,
):
super().__init__()
self.conv1 = FFC_BN_ACT(
dim,
dim,
kernel_size=3,
padding=dilation,
dilation=dilation,
norm_layer=norm_layer,
activation_layer=activation_layer,
padding_type=padding_type,
ratio_gin=ratio_gin,
ratio_gout=ratio_gout,
)
self.conv2 = FFC_BN_ACT(
dim,
dim,
kernel_size=3,
padding=dilation,
dilation=dilation,
norm_layer=norm_layer,
activation_layer=activation_layer,
padding_type=padding_type,
ratio_gin=ratio_gin,
ratio_gout=ratio_gout,
)
self.inline = inline
def forward(self, x, fname=None):
if self.inline:
x_l, x_g = (
x[:, : -self.conv1.ffc.global_in_num],
x[:, -self.conv1.ffc.global_in_num :],
)
else:
x_l, x_g = x if type(x) is tuple else (x, 0)
id_l, id_g = x_l, x_g
x_l, x_g = self.conv1((x_l, x_g), fname=fname)
x_l, x_g = self.conv2((x_l, x_g), fname=fname)
x_l, x_g = id_l + x_l, id_g + x_g
out = x_l, x_g
if self.inline:
out = torch.cat(out, dim=1)
return out
class ConcatTupleLayer(nn.Module):
def forward(self, x):
assert isinstance(x, tuple)
x_l, x_g = x
assert torch.is_tensor(x_l) or torch.is_tensor(x_g)
if not torch.is_tensor(x_g):
return x_l
return torch.cat(x, dim=1)
class FFCBlock(torch.nn.Module):
def __init__(
self,
dim, # Number of output/input channels.
kernel_size, # Width and height of the convolution kernel.
padding,
ratio_gin=0.75,
ratio_gout=0.75,
activation="linear", # Activation function: 'relu', 'lrelu', etc.
):
super().__init__()
if activation == "linear":
self.activation = nn.Identity
else:
self.activation = nn.ReLU
self.padding = padding
self.kernel_size = kernel_size
self.ffc_block = FFCResnetBlock(
dim=dim,
padding_type="reflect",
norm_layer=nn.SyncBatchNorm,
activation_layer=self.activation,
dilation=1,
ratio_gin=ratio_gin,
ratio_gout=ratio_gout,
)
self.concat_layer = ConcatTupleLayer()
def forward(self, gen_ft, mask, fname=None):
x = gen_ft.float()
x_l, x_g = (
x[:, : -self.ffc_block.conv1.ffc.global_in_num],
x[:, -self.ffc_block.conv1.ffc.global_in_num :],
)
id_l, id_g = x_l, x_g
x_l, x_g = self.ffc_block((x_l, x_g), fname=fname)
x_l, x_g = id_l + x_l, id_g + x_g
x = self.concat_layer((x_l, x_g))
return x + gen_ft.float()
class FFCSkipLayer(torch.nn.Module):
def __init__(
self,
dim, # Number of input/output channels.
kernel_size=3, # Convolution kernel size.
ratio_gin=0.75,
ratio_gout=0.75,
):
super().__init__()
self.padding = kernel_size // 2
self.ffc_act = FFCBlock(
dim=dim,
kernel_size=kernel_size,
activation=nn.ReLU,
padding=self.padding,
ratio_gin=ratio_gin,
ratio_gout=ratio_gout,
)
def forward(self, gen_ft, mask, fname=None):
x = self.ffc_act(gen_ft, mask, fname=fname)
return x
class SynthesisBlock(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture="skip", # Architecture: 'orig', 'skip', 'resnet'.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16=False, # Use FP16 for this block?
fp16_channels_last=False, # Use channels-last memory format with FP16?
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ["orig", "skip", "resnet"]
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = use_fp16 and fp16_channels_last
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
self.res_ffc = {4: 0, 8: 0, 16: 0, 32: 1, 64: 1, 128: 1, 256: 1, 512: 1}
if in_channels != 0 and resolution >= 8:
self.ffc_skip = nn.ModuleList()
for _ in range(self.res_ffc[resolution]):
self.ffc_skip.append(FFCSkipLayer(dim=out_channels))
if in_channels == 0:
self.const = torch.nn.Parameter(
torch.randn([out_channels, resolution, resolution])
)
if in_channels != 0:
self.conv0 = SynthesisLayer(
in_channels,
out_channels,
w_dim=w_dim * 3,
resolution=resolution,
up=2,
resample_filter=resample_filter,
conv_clamp=conv_clamp,
channels_last=self.channels_last,
**layer_kwargs,
)
self.num_conv += 1
self.conv1 = SynthesisLayer(
out_channels,
out_channels,
w_dim=w_dim * 3,
resolution=resolution,
conv_clamp=conv_clamp,
channels_last=self.channels_last,
**layer_kwargs,
)
self.num_conv += 1
if is_last or architecture == "skip":
self.torgb = ToRGBLayer(
out_channels,
img_channels,
w_dim=w_dim * 3,
conv_clamp=conv_clamp,
channels_last=self.channels_last,
)
self.num_torgb += 1
if in_channels != 0 and architecture == "resnet":
self.skip = Conv2dLayer(
in_channels,
out_channels,
kernel_size=1,
bias=False,
up=2,
resample_filter=resample_filter,
channels_last=self.channels_last,
)
def forward(
self,
x,
mask,
feats,
img,
ws,
fname=None,
force_fp32=False,
fused_modconv=None,
**layer_kwargs,
):
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
dtype = torch.float32
memory_format = (
torch.channels_last
if self.channels_last and not force_fp32
else torch.contiguous_format
)
if fused_modconv is None:
fused_modconv = (not self.training) and (
dtype == torch.float32 or int(x.shape[0]) == 1
)
x = x.to(dtype=dtype, memory_format=memory_format)
x_skip = (
feats[self.resolution].clone().to(dtype=dtype, memory_format=memory_format)
)
# Main layers.
if self.in_channels == 0:
x = self.conv1(x, ws[1], fused_modconv=fused_modconv, **layer_kwargs)
elif self.architecture == "resnet":
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(
x, ws[0].clone(), fused_modconv=fused_modconv, **layer_kwargs
)
if len(self.ffc_skip) > 0:
mask = F.interpolate(
mask,
size=x_skip.shape[2:],
)
z = x + x_skip
for fres in self.ffc_skip:
z = fres(z, mask)
x = x + z
else:
x = x + x_skip
x = self.conv1(
x,
ws[1].clone(),
fused_modconv=fused_modconv,
gain=np.sqrt(0.5),
**layer_kwargs,
)
x = y.add_(x)
else:
x = self.conv0(
x, ws[0].clone(), fused_modconv=fused_modconv, **layer_kwargs
)
if len(self.ffc_skip) > 0:
mask = F.interpolate(
mask,
size=x_skip.shape[2:],
)
z = x + x_skip
for fres in self.ffc_skip:
z = fres(z, mask)
x = x + z
else:
x = x + x_skip
x = self.conv1(
x, ws[1].clone(), fused_modconv=fused_modconv, **layer_kwargs
)
# ToRGB.
if img is not None:
img = upsample2d(img, self.resample_filter)
if self.is_last or self.architecture == "skip":
y = self.torgb(x, ws[2].clone(), fused_modconv=fused_modconv)
y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
img = img.add_(y) if img is not None else y
x = x.to(dtype=dtype)
assert x.dtype == dtype
assert img is None or img.dtype == torch.float32
return x, img
class SynthesisNetwork(torch.nn.Module):
def __init__(
self,
w_dim, # Intermediate latent (W) dimensionality.
z_dim, # Output Latent (Z) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base=16384, # Overall multiplier for the number of channels.
channel_max=512, # Maximum number of channels in any layer.
num_fp16_res=0, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [
2**i for i in range(3, self.img_resolution_log2 + 1)
]
channels_dict = {
res: min(channel_base // res, channel_max) for res in self.block_resolutions
}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.foreword = SynthesisForeword(
img_channels=img_channels,
in_channels=min(channel_base // 4, channel_max),
z_dim=z_dim * 2,
resolution=4,
)
self.num_ws = self.img_resolution_log2 * 2 - 2
for res in self.block_resolutions:
if res // 2 in channels_dict.keys():
in_channels = channels_dict[res // 2] if res > 4 else 0
else:
in_channels = min(channel_base // (res // 2), channel_max)
out_channels = channels_dict[res]
use_fp16 = res >= fp16_resolution
use_fp16 = False
is_last = res == self.img_resolution
block = SynthesisBlock(
in_channels,
out_channels,
w_dim=w_dim,
resolution=res,
img_channels=img_channels,
is_last=is_last,
use_fp16=use_fp16,
**block_kwargs,
)
setattr(self, f"b{res}", block)
def forward(self, x_global, mask, feats, ws, fname=None, **block_kwargs):
img = None
x, img = self.foreword(x_global, ws, feats, img)
for res in self.block_resolutions:
block = getattr(self, f"b{res}")
mod_vector0 = []
mod_vector0.append(ws[:, int(np.log2(res)) * 2 - 5])
mod_vector0.append(x_global.clone())
mod_vector0 = torch.cat(mod_vector0, dim=1)
mod_vector1 = []
mod_vector1.append(ws[:, int(np.log2(res)) * 2 - 4])
mod_vector1.append(x_global.clone())
mod_vector1 = torch.cat(mod_vector1, dim=1)
mod_vector_rgb = []
mod_vector_rgb.append(ws[:, int(np.log2(res)) * 2 - 3])
mod_vector_rgb.append(x_global.clone())
mod_vector_rgb = torch.cat(mod_vector_rgb, dim=1)
x, img = block(
x,
mask,
feats,
img,
(mod_vector0, mod_vector1, mod_vector_rgb),
fname=fname,
**block_kwargs,
)
return img
class MappingNetwork(torch.nn.Module):
def __init__(
self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers=8, # Number of mapping layers.
embed_features=None, # Label embedding dimensionality, None = same as w_dim.
layer_features=None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
lr_multiplier=0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta=0.995, # Decay for tracking the moving average of W during training, None = do not track.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = (
[z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
)
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(
in_features,
out_features,
activation=activation,
lr_multiplier=lr_multiplier,
)
setattr(self, f"fc{idx}", layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer("w_avg", torch.zeros([w_dim]))
def forward(
self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False
):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function("input"):
if self.z_dim > 0:
x = normalize_2nd_moment(z.to(torch.float32))
if self.c_dim > 0:
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f"fc{idx}")
x = layer(x)
# Update moving average of W.
if self.w_avg_beta is not None and self.training and not skip_w_avg_update:
with torch.autograd.profiler.record_function("update_w_avg"):
self.w_avg.copy_(
x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)
)
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function("broadcast"):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function("truncate"):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(
x[:, :truncation_cutoff], truncation_psi
)
return x
class Generator(torch.nn.Module):
def __init__(
self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
encoder_kwargs={}, # Arguments for EncoderNetwork.
mapping_kwargs={}, # Arguments for MappingNetwork.
synthesis_kwargs={}, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.encoder = EncoderNetwork(
c_dim=c_dim,
z_dim=z_dim,
img_resolution=img_resolution,
img_channels=img_channels,
**encoder_kwargs,
)
self.synthesis = SynthesisNetwork(
z_dim=z_dim,
w_dim=w_dim,
img_resolution=img_resolution,
img_channels=img_channels,
**synthesis_kwargs,
)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(
z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs
)
def forward(
self,
img,
c,
fname=None,
truncation_psi=1,
truncation_cutoff=None,
**synthesis_kwargs,
):
mask = img[:, -1].unsqueeze(1)
x_global, z, feats = self.encoder(img, c)
ws = self.mapping(
z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff
)
img = self.synthesis(x_global, mask, feats, ws, fname=fname, **synthesis_kwargs)
return img
FCF_MODEL_URL = os.environ.get(
"FCF_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_fcf/places_512_G.pth",
)
FCF_MODEL_MD5 = os.environ.get("FCF_MODEL_MD5", "3323152bc01bf1c56fd8aba74435a211")
class FcF(InpaintModel):
name = "fcf"
min_size = 512
pad_mod = 512
pad_to_square = True
def init_model(self, device, **kwargs):
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
kwargs = {
"channel_base": 1 * 32768,
"channel_max": 512,
"num_fp16_res": 4,
"conv_clamp": 256,
}
G = Generator(
z_dim=512,
c_dim=0,
w_dim=512,
img_resolution=512,
img_channels=3,
synthesis_kwargs=kwargs,
encoder_kwargs=kwargs,
mapping_kwargs={"num_layers": 2},
)
self.model = load_model(G, FCF_MODEL_URL, device, FCF_MODEL_MD5)
self.label = torch.zeros([1, self.model.c_dim], device=device)
@staticmethod
def is_downloaded() -> bool:
return os.path.exists(get_cache_path_by_url(FCF_MODEL_URL))
@torch.no_grad()
def __call__(self, image, mask, config: Config):
"""
images: [H, W, C] RGB, not normalized
masks: [H, W]
return: BGR IMAGE
"""
if image.shape[0] == 512 and image.shape[1] == 512:
return self._pad_forward(image, mask, config)
boxes = boxes_from_mask(mask)
crop_result = []
config.hd_strategy_crop_margin = 128
for box in boxes:
crop_image, crop_mask, crop_box = self._crop_box(image, mask, box, config)
origin_size = crop_image.shape[:2]
resize_image = resize_max_size(crop_image, size_limit=512)
resize_mask = resize_max_size(crop_mask, size_limit=512)
inpaint_result = self._pad_forward(resize_image, resize_mask, config)
# only paste masked area result
inpaint_result = cv2.resize(
inpaint_result,
(origin_size[1], origin_size[0]),
interpolation=cv2.INTER_CUBIC,
)
original_pixel_indices = crop_mask < 127
inpaint_result[original_pixel_indices] = crop_image[:, :, ::-1][
original_pixel_indices
]
crop_result.append((inpaint_result, crop_box))
inpaint_result = image[:, :, ::-1]
for crop_image, crop_box in crop_result:
x1, y1, x2, y2 = crop_box
inpaint_result[y1:y2, x1:x2, :] = crop_image
return inpaint_result
def forward(self, image, mask, config: Config):
"""Input images and output images have same size
images: [H, W, C] RGB
masks: [H, W] mask area == 255
return: BGR IMAGE
"""
image = norm_img(image) # [0, 1]
image = image * 2 - 1 # [0, 1] -> [-1, 1]
mask = (mask > 120) * 255
mask = norm_img(mask)
image = torch.from_numpy(image).unsqueeze(0).to(self.device)
mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
erased_img = image * (1 - mask)
input_image = torch.cat([0.5 - mask, erased_img], dim=1)
output = self.model(
input_image, self.label, truncation_psi=0.1, noise_mode="none"
)
output = (
(output.permute(0, 2, 3, 1) * 127.5 + 127.5)
.round()
.clamp(0, 255)
.to(torch.uint8)
)
output = output[0].cpu().numpy()
cur_res = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return cur_res
| 57,098 | 31.929066 | 124 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/sd.py | import gc
import PIL.Image
import cv2
import numpy as np
import torch
from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import torch_gc, get_scheduler
from lama_cleaner.schema import Config
class CPUTextEncoderWrapper:
def __init__(self, text_encoder, torch_dtype):
self.config = text_encoder.config
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
self.torch_dtype = torch_dtype
del text_encoder
torch_gc()
def __call__(self, x, **kwargs):
input_device = x.device
return [
self.text_encoder(x.to(self.text_encoder.device), **kwargs)[0]
.to(input_device)
.to(self.torch_dtype)
]
@property
def dtype(self):
return self.torch_dtype
def load_from_local_model(local_model_path, torch_dtype, disable_nsfw=True):
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
download_from_original_stable_diffusion_ckpt,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
logger.info(f"Converting {local_model_path} to diffusers pipeline")
pipe = download_from_original_stable_diffusion_ckpt(
local_model_path,
num_in_channels=9,
from_safetensors=local_model_path.endswith("safetensors"),
device="cpu",
)
inpaint_pipe = StableDiffusionInpaintPipeline(
vae=pipe.vae,
text_encoder=pipe.text_encoder,
tokenizer=pipe.tokenizer,
unet=pipe.unet,
scheduler=pipe.scheduler,
safety_checker=None if disable_nsfw else pipe.safety_checker,
feature_extractor=None if disable_nsfw else pipe.safety_checker,
requires_safety_checker=not disable_nsfw,
)
del pipe
gc.collect()
return inpaint_pipe.to(torch_dtype=torch_dtype)
class SD(DiffusionInpaintModel):
pad_mod = 8
min_size = 512
def init_model(self, device: torch.device, **kwargs):
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
fp16 = not kwargs.get("no_half", False)
model_kwargs = {
"local_files_only": kwargs.get("local_files_only", kwargs["sd_run_local"])
}
if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False):
logger.info("Disable Stable Diffusion Model NSFW checker")
model_kwargs.update(
dict(
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
)
)
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
if kwargs.get("sd_local_model_path", None):
self.model = load_from_local_model(
kwargs["sd_local_model_path"],
torch_dtype=torch_dtype,
)
else:
self.model = StableDiffusionInpaintPipeline.from_pretrained(
self.model_id_or_path,
revision="fp16" if use_gpu and fp16 else "main",
torch_dtype=torch_dtype,
use_auth_token=kwargs["hf_access_token"],
**model_kwargs,
)
# https://huggingface.co/docs/diffusers/v0.7.0/en/api/pipelines/stable_diffusion#diffusers.StableDiffusionInpaintPipeline.enable_attention_slicing
self.model.enable_attention_slicing()
# https://huggingface.co/docs/diffusers/v0.7.0/en/optimization/fp16#memory-efficient-attention
if kwargs.get("enable_xformers", False):
self.model.enable_xformers_memory_efficient_attention()
if kwargs.get("cpu_offload", False) and use_gpu:
# TODO: gpu_id
logger.info("Enable sequential cpu offload")
self.model.enable_sequential_cpu_offload(gpu_id=0)
else:
self.model = self.model.to(device)
if kwargs["sd_cpu_textencoder"]:
logger.info("Run Stable Diffusion TextEncoder on CPU")
self.model.text_encoder = CPUTextEncoderWrapper(
self.model.text_encoder, torch_dtype
)
self.callback = kwargs.pop("callback", None)
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W, 1] 255 means area to repaint
return: BGR IMAGE
"""
scheduler_config = self.model.scheduler.config
scheduler = get_scheduler(config.sd_sampler, scheduler_config)
self.model.scheduler = scheduler
if config.sd_mask_blur != 0:
k = 2 * config.sd_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)[:, :, np.newaxis]
img_h, img_w = image.shape[:2]
output = self.model(
image=PIL.Image.fromarray(image),
prompt=config.prompt,
negative_prompt=config.negative_prompt,
mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"),
num_inference_steps=config.sd_steps,
guidance_scale=config.sd_guidance_scale,
output_type="np.array",
callback=self.callback,
height=img_h,
width=img_w,
generator=torch.manual_seed(config.sd_seed),
).images[0]
output = (output * 255).round().astype("uint8")
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
def forward_post_process(self, result, image, mask, config):
if config.sd_match_histograms:
result = self._match_histograms(result, image[:, :, ::-1], mask)
if config.sd_mask_blur != 0:
k = 2 * config.sd_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)
return result, image, mask
@staticmethod
def is_downloaded() -> bool:
# model will be downloaded when app start, and can't switch in frontend settings
return True
class SD15(SD):
name = "sd1.5"
model_id_or_path = "runwayml/stable-diffusion-inpainting"
class Anything4(SD):
name = "anything4"
model_id_or_path = "Sanster/anything-4.0-inpainting"
class RealisticVision14(SD):
name = "realisticVision1.4"
model_id_or_path = "Sanster/Realistic_Vision_V1.4-inpainting"
class SD2(SD):
name = "sd2"
model_id_or_path = "stabilityai/stable-diffusion-2-inpainting"
| 6,644 | 33.252577 | 154 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/pipeline/__init__.py | from .pipeline_stable_diffusion_controlnet_inpaint import (
StableDiffusionControlNetInpaintPipeline,
)
| 108 | 26.25 | 59 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/pipeline/pipeline_stable_diffusion_controlnet_inpaint.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copy from https://github.com/mikonvergence/ControlNetInpaint/blob/main/src/pipeline_stable_diffusion_controlnet_inpaint.py
import torch
import PIL.Image
import numpy as np
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import *
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> # !pip install opencv-python transformers accelerate
>>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, UniPCMultistepScheduler
>>> from diffusers.utils import load_image
>>> import numpy as np
>>> import torch
>>> import cv2
>>> from PIL import Image
>>> # download an image
>>> image = load_image(
... "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
... )
>>> image = np.array(image)
>>> mask_image = load_image(
... "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
... )
>>> mask_image = np.array(mask_image)
>>> # get canny image
>>> canny_image = cv2.Canny(image, 100, 200)
>>> canny_image = canny_image[:, :, None]
>>> canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2)
>>> canny_image = Image.fromarray(canny_image)
>>> # load control net and stable diffusion v1-5
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
>>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
... "runwayml/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16
... )
>>> # speed up diffusion process with faster scheduler and memory optimization
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
>>> # remove following line if xformers is not installed
>>> pipe.enable_xformers_memory_efficient_attention()
>>> pipe.enable_model_cpu_offload()
>>> # generate image
>>> generator = torch.manual_seed(0)
>>> image = pipe(
... "futuristic-looking doggo",
... num_inference_steps=20,
... generator=generator,
... image=image,
... control_image=canny_image,
... mask_image=mask_image
... ).images[0]
```
"""
def prepare_mask_and_masked_image(image, mask):
"""
Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
``image`` and ``1`` for the ``mask``.
The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
Args:
image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
Raises:
ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
(ot the other way around).
Returns:
tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
dimensions: ``batch x channels x height x width``.
"""
if isinstance(image, torch.Tensor):
if not isinstance(mask, torch.Tensor):
raise TypeError(
f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not"
)
# Batch single image
if image.ndim == 3:
assert (
image.shape[0] == 3
), "Image outside a batch should be of shape (3, H, W)"
image = image.unsqueeze(0)
# Batch and add channel dim for single mask
if mask.ndim == 2:
mask = mask.unsqueeze(0).unsqueeze(0)
# Batch single mask or add channel dim
if mask.ndim == 3:
# Single batched mask, no channel dim or single mask not batched but channel dim
if mask.shape[0] == 1:
mask = mask.unsqueeze(0)
# Batched masks no channel dim
else:
mask = mask.unsqueeze(1)
assert (
image.ndim == 4 and mask.ndim == 4
), "Image and Mask must have 4 dimensions"
assert (
image.shape[-2:] == mask.shape[-2:]
), "Image and Mask must have the same spatial dimensions"
assert (
image.shape[0] == mask.shape[0]
), "Image and Mask must have the same batch size"
# Check image is in [-1, 1]
if image.min() < -1 or image.max() > 1:
raise ValueError("Image should be in [-1, 1] range")
# Check mask is in [0, 1]
if mask.min() < 0 or mask.max() > 1:
raise ValueError("Mask should be in [0, 1] range")
# Binarize mask
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
# Image as float32
image = image.to(dtype=torch.float32)
elif isinstance(mask, torch.Tensor):
raise TypeError(
f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not"
)
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
image = [np.array(i.convert("RGB"))[None, :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
# preprocess mask
if isinstance(mask, (PIL.Image.Image, np.ndarray)):
mask = [mask]
if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
mask = np.concatenate(
[np.array(m.convert("L"))[None, None, :] for m in mask], axis=0
)
mask = mask.astype(np.float32) / 255.0
elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask)
masked_image = image * (mask < 0.5)
return mask, masked_image
class StableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetPipeline):
r"""
Pipeline for text-guided image inpainting using Stable Diffusion with ControlNet guidance.
This model inherits from [`StableDiffusionControlNetPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
controlnet ([`ControlNetModel`]):
Provides additional conditioning to the unet during the denoising process
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
def prepare_mask_latents(
self,
mask,
masked_image,
batch_size,
height,
width,
dtype,
device,
generator,
do_classifier_free_guidance,
):
# resize the mask to latents shape as we concatenate the mask to the latents
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
# and half precision
mask = torch.nn.functional.interpolate(
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
)
mask = mask.to(device=device, dtype=dtype)
masked_image = masked_image.to(device=device, dtype=dtype)
# encode the mask image into latents space so we can concatenate it to the latents
if isinstance(generator, list):
masked_image_latents = [
self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(
generator=generator[i]
)
for i in range(batch_size)
]
masked_image_latents = torch.cat(masked_image_latents, dim=0)
else:
masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(
generator=generator
)
masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
if mask.shape[0] < batch_size:
if not batch_size % mask.shape[0] == 0:
raise ValueError(
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
" of masks that you pass is divisible by the total requested batch size."
)
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
if masked_image_latents.shape[0] < batch_size:
if not batch_size % masked_image_latents.shape[0] == 0:
raise ValueError(
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
" Make sure the number of images that you pass is divisible by the total requested batch size."
)
masked_image_latents = masked_image_latents.repeat(
batch_size // masked_image_latents.shape[0], 1, 1, 1
)
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
masked_image_latents = (
torch.cat([masked_image_latents] * 2)
if do_classifier_free_guidance
else masked_image_latents
)
# aligning device to prevent device errors when concating it with the latent model input
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
return mask, masked_image_latents
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
control_image: Union[
torch.FloatTensor,
PIL.Image.Image,
List[torch.FloatTensor],
List[PIL.Image.Image],
] = None,
mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_conditioning_scale: float = 1.0,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
be masked out with `mask_image` and repainted according to `prompt`.
control_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
also be accepted as an image. The control image is automatically resized to fit the output image.
mask_image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be `(B, H, W, 1)`.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
to the residual in the original unet.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
# 0. Default height and width to unet
height, width = self._default_height_width(height, width, control_image)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
control_image,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
)
# 4. Prepare image
control_image = self.prepare_image(
control_image,
width,
height,
batch_size * num_images_per_prompt,
num_images_per_prompt,
device,
self.controlnet.dtype,
)
if do_classifier_free_guidance:
control_image = torch.cat([control_image] * 2)
# 5. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 6. Prepare latent variables
num_channels_latents = self.controlnet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# EXTRA: prepare mask latents
mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
mask, masked_image_latents = self.prepare_mask_latents(
mask,
masked_image,
batch_size * num_images_per_prompt,
height,
width,
prompt_embeds.dtype,
device,
generator,
do_classifier_free_guidance,
)
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 8. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = (
torch.cat([latents] * 2) if do_classifier_free_guidance else latents
)
latent_model_input = self.scheduler.scale_model_input(
latent_model_input, t
)
down_block_res_samples, mid_block_res_sample = self.controlnet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
controlnet_cond=control_image,
return_dict=False,
)
down_block_res_samples = [
down_block_res_sample * controlnet_conditioning_scale
for down_block_res_sample in down_block_res_samples
]
mid_block_res_sample *= controlnet_conditioning_scale
# predict the noise residual
latent_model_input = torch.cat(
[latent_model_input, mask, masked_image_latents], dim=1
)
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (
noise_pred_text - noise_pred_uncond
)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(
noise_pred, t, latents, **extra_step_kwargs
).prev_sample
# call the callback, if provided
if i == len(timesteps) - 1 or (
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
# If we do sequential model offloading, let's offload unet and controlnet
# manually for max memory savings
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.unet.to("cpu")
self.controlnet.to("cpu")
torch.cuda.empty_cache()
if output_type == "latent":
image = latents
has_nsfw_concept = None
elif output_type == "pil":
# 8. Post-processing
image = self.decode_latents(latents)
# 9. Run safety checker
image, has_nsfw_concept = self.run_safety_checker(
image, device, prompt_embeds.dtype
)
# 10. Convert to PIL
image = self.numpy_to_pil(image)
else:
# 8. Post-processing
image = self.decode_latents(latents)
# 9. Run safety checker
image, has_nsfw_concept = self.run_safety_checker(
image, device, prompt_embeds.dtype
)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(
images=image, nsfw_content_detected=has_nsfw_concept
)
| 28,155 | 47.047782 | 146 | py |
pygcn | pygcn-master/setup.py | from setuptools import setup
from setuptools import find_packages
setup(name='pygcn',
version='0.1',
description='Graph Convolutional Networks in PyTorch',
author='Thomas Kipf',
author_email='thomas.kipf@gmail.com',
url='https://tkipf.github.io',
download_url='https://github.com/tkipf/pygcn',
license='MIT',
install_requires=['numpy',
'torch',
'scipy'
],
package_data={'pygcn': ['README.md']},
packages=find_packages()) | 553 | 31.588235 | 60 | py |
pygcn | pygcn-master/pygcn/utils.py | import numpy as np
import scipy.sparse as sp
import torch
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def load_data(path="../data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
| 2,848 | 34.17284 | 78 | py |
pygcn | pygcn-master/pygcn/layers.py | import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
| 1,297 | 29.186047 | 77 | py |
pygcn | pygcn-master/pygcn/models.py | import torch.nn as nn
import torch.nn.functional as F
from pygcn.layers import GraphConvolution
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return F.log_softmax(x, dim=1)
| 541 | 27.526316 | 62 | py |
pygcn | pygcn-master/pygcn/__init__.py | from __future__ import print_function
from __future__ import division
from .layers import *
from .models import *
from .utils import * | 135 | 21.666667 | 37 | py |
pygcn | pygcn-master/pygcn/train.py | from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from pygcn.utils import load_data, accuracy
from pygcn.models import GCN
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()
# Model and optimizer
model = GCN(nfeat=features.shape[1],
nhid=args.hidden,
nclass=labels.max().item() + 1,
dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
if not args.fastmode:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(features, adj)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
# Train model
t_total = time.time()
for epoch in range(args.epochs):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# Testing
test()
| 3,427 | 31.037383 | 72 | py |
r_em | r_em-master/setup.py | from setuptools import setup, find_packages
from os import path
_dir = path.abspath(path.dirname(__file__))
with open(path.join(_dir, 'tk_r_em', 'version.py')) as f:
exec(f.read())
setup(
name=__name__,
version=__version__,
description=__description__,
url=__url__,
author=__author__,
author_email=__author_email__,
license=__license__,
packages=find_packages(),
project_urls={
'Repository': __url__,
},
install_requires=[
'numpy',
'h5py',
'matplotlib'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
include_package_data=True
) | 1,124 | 26.439024 | 75 | py |
r_em | r_em-master/example_exp_data.py | """
tk_r_em network suites designed to restore different modalities of electron microscopy data
Author: Ivan Lobato
Email: Ivanlh20@gmail.com
"""
import os
import matplotlib
# Check if running on remote SSH and use appropriate backend for matplotlib
remote_ssh = "SSH_CONNECTION" in os.environ
matplotlib.use('Agg' if remote_ssh else 'TkAgg')
import matplotlib.pyplot as plt
def fcn_set_gpu_id(gpu_visible_devices: str = "0") -> None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_visible_devices
fcn_set_gpu_id("0")
from tk_r_em import load_network, load_hrstem_exp_test_data
def fcn_inference():
"""
Perform inference on test data using a pre-trained model and visualize the results.
"""
# select one of the available networks from [sfr_hrsem, sfr_lrsem, sfr_hrstem, sfr_lrstem, sfr_hrtem, sfr_lrtem]
net_name = 'sfr_hrstem'
# load experimental hrstem data
x = load_hrstem_exp_test_data('exp_hrstem')
# load its corresponding model
r_em_nn = load_network(net_name)
r_em_nn.summary()
n_data = x.shape[0]
batch_size = 8
# run inference
y_p = r_em_nn.predict(x, batch_size)
fig, axs = plt.subplots(2, n_data, figsize=(48, 6))
for ik in range(n_data):
x_ik = x[ik, :, :, 0].squeeze()
y_p_ik = y_p[ik, :, :, 0].squeeze()
ir = 0
axs[ir][ik].imshow(x_ik, cmap='hot')
axs[ir][ik].set_xticks([])
axs[ir][ik].set_yticks([])
axs[ir][ik].grid(False)
if ik == 0:
axs[ir][ik].set_ylabel(f"Experimental {net_name} image", fontsize=14, )
ir = 1
axs[ir][ik].imshow(y_p_ik, cmap='hot')
axs[ir][ik].set_xticks([])
axs[ir][ik].set_yticks([])
axs[ir][ik].grid(False)
if ik == 0:
axs[ir][ik].set_ylabel(f"Restored {net_name} image", fontsize=14)
fig.subplots_adjust(hspace=2, wspace=10)
fig.tight_layout()
if remote_ssh:
plt.savefig(f"restored_{net_name}.png", format='png')
else:
fig.show()
print('Done')
if __name__ == '__main__':
fcn_inference() | 2,162 | 26.730769 | 116 | py |
r_em | r_em-master/example_sim_data.py | """
tk_r_em network suites designed to restore different modalities of electron microscopy data
Author: Ivan Lobato
Email: Ivanlh20@gmail.com
"""
import os
import matplotlib
# Check if running on remote SSH and use appropriate backend for matplotlib
remote_ssh = "SSH_CONNECTION" in os.environ
matplotlib.use('Agg' if remote_ssh else 'TkAgg')
import matplotlib.pyplot as plt
def fcn_set_gpu_id(gpu_visible_devices: str = "0") -> None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_visible_devices
fcn_set_gpu_id("0")
from tk_r_em import load_network, load_sim_test_data
def fcn_inference():
"""
Perform inference on test data using a pre-trained model and visualize the results.
"""
# select one of the available networks from [sfr_hrsem, sfr_lrsem, sfr_hrstem, sfr_lrstem, sfr_hrtem, sfr_lrtem]
net_name = 'sfr_hrstem'
# load its corresponding data
x, y = load_sim_test_data(net_name)
# load its corresponding model
r_em_nn = load_network(net_name)
r_em_nn.summary()
n_data = x.shape[0]
batch_size = 8
# run inference
y_p = r_em_nn.predict(x, batch_size)
fig, axs = plt.subplots(3, n_data, figsize=(48, 6))
for ik in range(n_data):
x_ik = x[ik, :, :, 0].squeeze()
y_p_ik = y_p[ik, :, :, 0].squeeze()
y_ik = y[ik, :, :, 0].squeeze()
ir = 0
axs[ir][ik].imshow(x_ik, cmap='viridis')
axs[ir][ik].set_xticks([])
axs[ir][ik].set_yticks([])
axs[ir][ik].grid(False)
if ik == 0:
axs[ir][ik].set_ylabel(f"Detected {net_name} image", fontsize=14, )
ir = 1
axs[ir][ik].imshow(y_p_ik, cmap='viridis')
axs[ir][ik].set_xticks([])
axs[ir][ik].set_yticks([])
axs[ir][ik].grid(False)
if ik == 0:
axs[ir][ik].set_ylabel(f"Restored {net_name} image", fontsize=14)
ir = 2
axs[ir][ik].imshow(y_ik, cmap='viridis')
axs[ir][ik].set_xticks([])
axs[ir][ik].set_yticks([])
axs[ir][ik].grid(False)
if ik == 0:
axs[ir][ik].set_ylabel(f"Ground truth {net_name} image", fontsize=14)
fig.subplots_adjust(hspace=2, wspace=10)
fig.tight_layout()
if remote_ssh:
plt.savefig(f"restored_{net_name}.png", format='png')
else:
fig.show()
print('Done')
if __name__ == '__main__':
fcn_inference() | 2,460 | 26.651685 | 116 | py |
r_em | r_em-master/example_sgl_exp_data.py | """
tk_r_em network suites designed to restore different modalities of electron microscopy data
Author: Ivan Lobato
Email: Ivanlh20@gmail.com
"""
import os
import matplotlib
# Check if running on remote SSH and use appropriate backend for matplotlib
remote_ssh = "SSH_CONNECTION" in os.environ
matplotlib.use('Agg' if remote_ssh else 'TkAgg')
import matplotlib.pyplot as plt
def fcn_set_gpu_id(gpu_visible_devices: str = "0") -> None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_visible_devices
fcn_set_gpu_id("0")
from tk_r_em import load_network, load_hrstem_exp_test_data
def fcn_inference():
"""
Perform inference on test data using a pre-trained model and visualize the results.
"""
# select one of the available networks from [sfr_hrsem, sfr_lrsem, sfr_hrstem, sfr_lrstem, sfr_hrtem, sfr_lrtem]
net_name = 'sfr_hrstem'
# load experimental hrstem data
x = load_hrstem_exp_test_data('sgl_exp_hrstem')
# load its corresponding model
r_em_nn = load_network(net_name)
r_em_nn.summary()
# run inference
y_p = r_em_nn.predict_patch_based(x, patch_size=256, stride=128, batch_size=16)
fig, axs = plt.subplots(1, 2, figsize=(48, 6))
ir = 0
axs[ir].imshow(x, cmap='hot')
axs[ir].set_xticks([])
axs[ir].set_yticks([])
axs[ir].grid(False)
axs[ir].set_title(f"Experimental {net_name} image", fontsize=14, )
ir = 1
axs[ir].imshow(y_p, cmap='hot')
axs[ir].set_xticks([])
axs[ir].set_yticks([])
axs[ir].grid(False)
axs[ir].set_title(f"Restored {net_name} image", fontsize=14)
fig.subplots_adjust(hspace=2, wspace=10)
fig.tight_layout()
if remote_ssh:
plt.savefig(f"restored_{net_name}.png", format='png')
else:
fig.show()
print('Done')
if __name__ == '__main__':
fcn_inference() | 1,891 | 27.666667 | 116 | py |
r_em | r_em-master/training/nn_fcns_local.py | #-*- coding: utf-8 -*-
"""
Created on Sun Feb 17 22:30:18 2019
__author__ = "Ivan Lobato"
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append('E:/Neural_network/nt_tf_lib')
sys.path.append('/media/hdd_1/nt_tf_lib')
sys.path.append('/ceph/users/gbz64553/data/Neural_network/nt_tf_lib')
#########################################################################################
import tensorflow as tf
#########################################################################################
import nn_var_glob as nvgb
import nn_fcns_gen as nfge
import nn_fcns_lays as nfly
import nn_fcns_nets as nnet
import nn_fcns_losses as nfls
import nn_fcns_callbacks as nfcb
import nn_fcns_optimizers as nfop
#########################################################################################
x_typ_mat = [np.uint16]
y_typ_mat = [np.uint16]
x_typ_rtf = [tf.uint16]
y_typ_rtf = [tf.uint16]
x_typ_tf = [tf.float32]
y_typ_tf = [tf.float32]
x_ndim = [4]
y_ndim = [4]
#########################################################################################
EE_STD_X = 0.1
EE_W_Y = 0.1
EE_STD_LCN = 0.1
GRAD_MAX = 100.00
#########################################################################################
# multi-local constrast normalization loss: I found out a proportion between mean(MLN(2+4+8+16))/L1 = 2.62 for EE_STD = 0.1
MLWT_KSZ = [2, 4, 8, 16]
MLWT_WGTL = np.array([1.0, 1.33, 1.66, 2.0], np.float32)
MLWT_WGTL /= MLWT_WGTL.sum()
#########################################################################################
FFT_N = 0.1250
FFT_FR = 2.0*np.power(256.0, -FFT_N)
#########################################################################################
# DOWNSCALING factor
DS_FTR = 1
RS_0 = 64
RS_E = 192
V_MIN = 1.0e-9
R_FTR_OPT = 0 # 0: Deactivated, 1:Fix value, 2:Increasing, 3:Random
R_FTR_VAL = 1.0
#########################################################################################
# Loss normalization
LOSS_NORM = True
MATLAB_EXPORT = False
#########################################################################################
GAN_TYP = 0 # 0:deactivated, 1: Active
# 0: gan - gen
# 1: gan - disc
# 2: L1
# 3: L2
# 4: L1 - MLWT
# 5: L1 - FFT
# 6: mean
# 7: std
#########################################################################################
#################################################### 0, 1, 2, 3 , 4, 5, 6, 7
LOSS_TYP, LOSS_WGT_TYP = nfge.fcn_loss_type_weight([5.0e-4, 1.0000, 10.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], 1.0)
#########################################################################################
###################################### database #########################################
#########################################################################################
def fcn_g_mean_std(y, axis, std_min):
y_mean, y_var = tf.nn.moments(y, axes=axis, keepdims=True)
y_std = tf.math.maximum(tf.sqrt(y_var), std_min)
return y_mean, y_std
def fcn_wt_x(x, axis, std_min):
x_sft, x_sc = fcn_g_mean_std(x, axis, std_min)
x_t = (x - x_sft)/x_sc
return x_t
def fcn_wt_xy(x, y, axis, std_min):
x_sft, x_sc = fcn_g_mean_std(x, axis, std_min)
x_t = (x - x_sft)/x_sc
y_t = (y - x_sft)/x_sc
return x_t, y_t
#########################################################################################
def fcn_scale_inputs_x_y(x, y, axis):
sft, sc = fcn_g_mean_std(x, axis, EE_STD_X)
x = (x - sft)/sc
y = (y - sft)/sc
return x, y
#########################################################################################
@tf.function
def fcn_ds_map(record_string, bb_norm, bb_aug):
data = nfge.fcn_ds_parse_sgl_ex_string_string(record_string)
x = nfge.fcn_string_dc(data['x'], x_typ_rtf[0], x_typ_tf[0])
x = nfge.fcn_x_reshape(x)
y = nfge.fcn_string_dc(data['y'], y_typ_rtf[0], y_typ_tf[0])
y = nfge.fcn_y_reshape(y)
if bb_norm:
x, y = fcn_scale_inputs_x_y(x, y, [0, 1])
return x, y
#########################################################################################
if R_FTR_OPT==0:
def fcn_x(x, y, ibg, ibg_m):
return x
elif R_FTR_OPT==1:
def fcn_x(x, y, ibg, ibg_m):
r = R_FTR_VAL
x_op = tf.expand_dims(x, axis=-1)
x_op = y + r*(x_op - y)
return x_op
elif R_FTR_OPT==2:
def fcn_x(x, y, ibg, ibg_m):
r = tf.minimum(1.0, tf.maximum(0.0, ibg/ibg_m))
x_op = tf.expand_dims(x, axis=-1)
x_op = y + r*(x_op - y)
return x_op
else:
def fcn_x(x, y, ibg, ibg_m):
r = tf.random.uniform((), dtype=tf.float32)
x_op = tf.expand_dims(x[..., 0], axis=-1)
x_op = y + r*(x_op - y)
return x_op
def fcn_gn(x, sigma, ftr_min, ftr_max):
r = tf.random.uniform(tf.shape(sigma), minval=ftr_min, maxval=ftr_max, dtype=x_typ_tf[0], seed=2000)
return x + tf.random.normal(tf.shape(x), 0.0, r*sigma, dtype=x_typ_tf[0])
def fcn_input_disc(x, y, y_p):
bb_noise = tf.less(tf.random.uniform((), dtype=tf.float32, seed=2001), 0.50)
if bb_noise:
sigma = tf.maximum(0.01, tf.math.reduce_std(y, axis=[1, 2], keepdims=True))
y = fcn_gn(y, sigma, 0.001, 0.10)
y_p = fcn_gn(y_p, sigma, 0.001, 0.10)
return tf.concat([x, y], axis=-1), tf.concat([x, y_p], axis=-1)
if nvgb.isone(FFT_N):
def fcn_pow_fft_n(y):
return y
elif nvgb.iszero(FFT_N-0.5):
def fcn_pow_fft_n(y):
return tf.sqrt(y)
else:
def fcn_pow_fft_n(y):
return tf.pow(tf.math.maximum(V_MIN, y), FFT_N)
def fcn_sft_std(x, kn_sz):
# local constrast normalization
x_sft = tf.nn.avg_pool2d(x, kn_sz, strides=(1, 1), padding='SAME')
x_std = tf.nn.avg_pool2d(tf.math.squared_difference(x, x_sft), kn_sz, strides=(1, 1), padding='SAME')
x_std = tf.math.maximum(tf.sqrt(x_std), EE_STD_LCN)
# # Gaussian smoothing
# x_sft = tfa.image.gaussian_filter2d(x_sft, filter_shape=2*kn_sz[0], sigma = 0.5*kn_sz[0], padding='REFLECT')
# x_std = tfa.image.gaussian_filter2d(x_std, filter_shape=2*kn_sz[0], sigma = 0.5*kn_sz[0], padding='REFLECT')
return x_sft, x_std
def fcn_mlwt(y_t, y_p, kn_sz):
# get smooth shift and scaling
x_sft, x_sc = fcn_sft_std(y_t, kn_sz)
# normalization
y_t = (y_t - x_sft)/x_sc
y_p = (y_p - x_sft)/x_sc
# whitening transform
y_t, y_p = fcn_wt_xy(y_t, y_p, [1, 2], EE_W_Y)
return y_t, y_p
if LOSS_NORM:
def fcn_g_weight(y):
y_std = tf.math.reduce_std(y, axis=[1, 2], keepdims=True)
y_std = tf.math.maximum(y_std, EE_W_Y)
y_w = 1.0/y_std
return y_w
def fcn_g_apply_weight(y_t, y_p):
w = fcn_g_weight(y_t)
# apply weights
y_t = w*y_t
y_p = w*y_p
return y_t, y_p
else:
def fcn_g_apply_weight(y_t, y_p):
return y_t, y_p
#########################################################################################
def fmet_l1(y_true, y_pred):
return nfls.fls_mae(y_true, y_pred)
#########################################################################################
def fls_l1(y_true, y_pred):
return nfls.fls_mae(y_true, y_pred)
#########################################################################################
def fls_l2(y_true, y_pred):
loss = tf.math.squared_difference(y_true, y_pred)
return tf.reduce_mean(tf.math.real(loss))
#########################################################################################
def fls_l1_mlwt(y_true, y_pred):
# whitening transform
y_t, y_p = fcn_wt_xy(y_true, y_pred, [1, 2], EE_W_Y)
# multilocal whitening transform
y_t_t, y_p_t = fcn_mlwt(y_t, y_p, (MLWT_KSZ[0], MLWT_KSZ[0]))
loss = MLWT_WGTL[0]*nfls.fls_mae(y_t_t, y_p_t)
y_t_t, y_p_t = fcn_mlwt(y_t, y_p,(MLWT_KSZ[1], MLWT_KSZ[1]))
loss += MLWT_WGTL[1]*nfls.fls_mae(y_t_t, y_p_t)
y_t_t, y_p_t = fcn_mlwt(y_t, y_p, (MLWT_KSZ[2], MLWT_KSZ[2]))
loss += MLWT_WGTL[2]*nfls.fls_mae(y_t_t, y_p_t)
y_t_t, y_p_t = fcn_mlwt(y_t, y_p, (MLWT_KSZ[3], MLWT_KSZ[3]))
loss += MLWT_WGTL[3]*nfls.fls_mae(y_t_t, y_p_t)
return loss
#########################################################################################
def fls_l1_fft(y_true, y_pred):
loss = FFT_FR*(y_true[..., -1]-y_pred[..., -1]) # in is place in this position in order to avoid numerical overflow
loss = tf.abs(tf.signal.rfft2d(loss))
loss = fcn_pow_fft_n(loss)
return tf.reduce_mean(loss)
#########################################################################################
def fls_l1_mean(y_true, y_pred):
loss = tf.abs(tf.reduce_mean(y_true, axis=[1, 2]) - tf.reduce_mean(y_pred, axis=[1, 2]))
return tf.reduce_mean(loss)
#########################################################################################
def fls_l1_std(y_true, y_pred):
loss = tf.abs(tf.math.reduce_std(y_true, axis=[1, 2]) - tf.math.reduce_std(y_pred, axis=[1, 2]))
return tf.reduce_mean(loss)
#########################################################################################
if LOSS_TYP[2]:
def fls_l1_w(y_true, y_pred):
loss = fls_l1(y_true, y_pred)
loss_w = LOSS_WGT_TYP[2]*loss
return loss_w, loss
else:
def fls_l1_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
if LOSS_TYP[3]:
def fls_l2_w(y_true, y_pred):
loss = fls_l2(y_true, y_pred)
loss_w = LOSS_WGT_TYP[3]*loss
return loss_w, loss
else:
def fls_l2_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
if LOSS_TYP[4]:
def fls_l1_mlwt_w(y_true, y_pred):
loss = fls_l1_mlwt(y_true, y_pred)
loss_w = LOSS_WGT_TYP[4]*loss
return loss_w, loss
else:
def fls_l1_mlwt_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
if LOSS_TYP[5]:
def fls_l1_fft_w(y_true, y_pred):
loss = fls_l1_fft(y_true, y_pred)
loss_w = LOSS_WGT_TYP[5]*loss
return loss_w, loss
else:
def fls_l1_fft_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
#########################################################################################
if LOSS_TYP[6]:
def fls_l1_mean_w(y_true, y_pred):
loss = fls_l1_mean(y_true, y_pred)
loss_w = LOSS_WGT_TYP[6]*loss
return loss_w, loss
else:
def fls_l1_mean_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
if LOSS_TYP[7]:
def fls_l1_std_w(y_true, y_pred):
loss = fls_l1_std(y_true, y_pred)
loss_w = LOSS_WGT_TYP[7]*loss
return loss_w, loss
else:
def fls_l1_std_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
#########################################################################################
def fls_pw_w(y_t_i, y_p_i):
y_t, y_p = fcn_g_apply_weight(y_t_i, y_p_i)
loss_l1_w, loss_l1 = fls_l1_w(y_t, y_p)
loss_l2_w, loss_l2 = fls_l2_w(y_t, y_p)
loss_l1_mlwt_w, loss_l1_mlwt = fls_l1_mlwt_w(y_t, y_p)
loss_l1_fft_w, loss_l1_fft = fls_l1_fft_w(y_t, y_p)
loss_l1_mean_w, loss_l1_mean = fls_l1_mean_w(y_t, y_p)
loss_l1_std_w, loss_l1_std = fls_l1_std_w(y_t, y_p)
loss_pw_w = loss_l1_w + loss_l2_w + loss_l1_mlwt_w + loss_l1_fft_w + loss_l1_mean_w + loss_l1_std_w
met_l1 = fmet_l1(y_t_i, y_p_i)
return {'loss_pw_w': loss_pw_w, \
'loss_l1_w': loss_l1_w, 'loss_l1': loss_l1, \
'loss_l2_w': loss_l2_w, 'loss_l2': loss_l2, \
'loss_l1_mlwt_w': loss_l1_mlwt_w, 'loss_l1_mlwt': loss_l1_mlwt, \
'loss_l1_fft_w': loss_l1_fft_w, 'loss_l1_fft': loss_l1_fft, \
'loss_l1_mean_w': loss_l1_mean_w, 'loss_l1_mean': loss_l1_mean,\
'loss_l1_std_w': loss_l1_std_w, 'loss_l1_std': loss_l1_std,\
'met_l1': met_l1}
#########################################################################################
def fls_adv(y_d_real, y_d_gen):
y_real = y_d_real - tf.reduce_mean(y_d_gen)
y_gen = y_d_gen - tf.reduce_mean(y_d_real)
loss_real = tf.keras.losses.BinaryCrossentropy(from_logits=True)(tf.zeros_like(y_real), y_real)
loss_gen = tf.keras.losses.BinaryCrossentropy(from_logits=True)(tf.ones_like(y_gen), y_gen)
loss = loss_real + loss_gen
return loss
def fls_adv_w(y_d_real, y_d_gen):
loss = fls_adv(y_d_real, y_d_gen)
loss_w = LOSS_WGT_TYP[0]*loss
return loss_w, loss
#########################################################################################
def fls_disc(y_d_real, y_d_gen):
y_real = y_d_real - tf.reduce_mean(y_d_gen)
y_gen = y_d_gen - tf.reduce_mean(y_d_real)
d = 0.05
y_ones = tf.ones_like(y_real)-d
loss_real = tf.keras.losses.BinaryCrossentropy(from_logits=True)(y_ones, y_real)
loss_gen = tf.keras.losses.BinaryCrossentropy(from_logits=True)(tf.zeros_like(y_gen), y_gen)
loss = loss_real + loss_gen
return loss
def fls_disc_w(y_d_real, y_d_gen):
loss = fls_disc(y_d_real, y_d_gen)
loss_w = LOSS_WGT_TYP[1]*loss
return loss_w, loss
#########################################################################################
##################################### generator #########################################
#########################################################################################
# CGRDN global residual connection between input and output
def fcn_net_gen_v1(x, ftrs_i, ftrs_o, ftrs_g, ftrs_btn, kn_sz_dus, stri_dus, kn_sz, act_str, n_lays, n_rdb, n_grdb, name, parm_init=None, parm_reg=None, parm_cstr=None):
dilt_rt = 1
parm_dp = nvgb.fcn_read_dropout_parm_dict()
parm_norm = nvgb.fcn_read_normalization_parm_dict()
dsn_typ = nvgb.DENSENET_TYPE
use_bi = nfly.fcn_use_bias_norm_parm(parm_norm)
act_str_fst = None
act_str_down = None
act_str_up = None
act_str_last = None
dpr_tra_0, dpr_tra_dn, dpr_tra_e, dpr_dn_1, dpr_dn_2 = nfly.fcn_set_dn_dropout(parm_dp['dp_opt'], parm_dp['dp_rt'])[0:5]
# global residual
x_i = x
# first
x = nfly.fcn_conv_2d(x, ftrs_i, kn_sz, (1, 1), act_str_fst, dilt_rt, True, 'same', dpr_tra_0, name + 'fst_0', parm_norm, parm_init, parm_reg, parm_cstr)
# downsampling
x = nfly.fcn_conv_2d_gen(x, ftrs_i, kn_sz_dus, stri_dus, act_str_down, dilt_rt, use_bi, 'same', dpr_tra_0, name + 'down_spl_0', parm_norm, parm_init, parm_reg, parm_cstr)
# middle
x_skc = x
x_cc = []
for ik in range(n_grdb):
name_grdb = name + 'g_' + str(ik+1)
x = nnet.fcn_grdn(x, ftrs_g, ftrs_btn, kn_sz, act_str, n_lays, n_rdb, dilt_rt, (dpr_dn_1, dpr_dn_2), dsn_typ, name_grdb, parm_norm, parm_init, parm_reg, parm_cstr)
x_cc.append(x)
x_cc = tf.keras.layers.Concatenate(axis=3, name=name + 'g_concat')(x_cc)
x = nfly.fcn_conv_2d(x_cc, ftrs_i, (1, 1), (1, 1), nvgb.DENSENET_FSL_ACT, 1, True, 'same', 0.0, name + 'g_fsl', parm_norm, parm_init, parm_reg, parm_cstr)
x = tf.keras.layers.Add(name=name + 'g_add')([x, x_skc])
# upsampling
x = nfly.fcn_dconv_2d_gen(x, ftrs_i, kn_sz_dus, stri_dus, act_str_up, dilt_rt, use_bi, 'same', dpr_tra_e, name + 'up_spl_0', parm_norm, parm_init, parm_reg, parm_cstr)
# last
x = nfly.fcn_conv_2d(x, ftrs_o, kn_sz, (1, 1), act_str_last, dilt_rt, True, 'same', 0.0, name + 'last_0', parm_norm, parm_init, parm_reg={'reg_wtyp': 0, 'reg_atyp': 0}, parm_cstr={'cstr_typ': 0})
# global residual
x = tf.keras.layers.Add(name=name + 'add_0')([x, x_i])
return x
def nn_model_gen(input_shape, input_name, prefix_layer, model_name):
modv = nvgb.MODV % 10
if modv==1:
n_lays = 4
n_rdb = 4
n_grdb = 4
ftrs_g = 16
elif modv==2:
n_lays = 4
n_rdb = 4
n_grdb = 4
ftrs_g = 24
elif modv==3:
n_lays = 4
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==4:
n_lays = 9
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==5:
n_lays = 4
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==6:
n_lays = 5
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==7:
n_lays = 6
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==8:
n_lays = 7
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==9:
n_lays = 8
n_rdb = 4
n_grdb = 4
ftrs_g = 32
ftrs_i = 64
ftrs_btn = nvgb.DENSENET_BOTTLENECK_FR*ftrs_g
ftrs_o = nvgb.Y_SHAPE[2]
kn_sz_dus = (4, 4)
stri_dus = (2, 2)
kn_sz = (3, 3)
act_str = nvgb.ACTIVATION_STR
x_i = tf.keras.layers.Input(shape=input_shape, name=input_name, dtype='float32')
x = fcn_net_gen_v1(x_i, ftrs_i, ftrs_o, ftrs_g, ftrs_btn, kn_sz_dus, stri_dus, kn_sz, act_str, n_lays, n_rdb, n_grdb, prefix_layer)
return tf.keras.models.Model(inputs=x_i, outputs=x, name=model_name)
################################### discriminator #######################################
def fcn_net_disc_v1(x, ftrs_o, act_str, name, parm_norm=None, parm_init=None, parm_reg=None, parm_cstr=None):
parm_norm = {'norm_typ': 1, 'norm_pos': 2, 'norm_m': 0.95, 'norm_eps': 1e-3}
parm_init = {'init_typ': 7, 'init_sfr': 0.02}
parm_reg = {'reg_wtyp': 0, 'reg_wkn': 2e-6, 'reg_wkn': 2e-5}
parm_cstr = None
ftrs_i = 64
kn_sz = (4, 4)
dp_rt = 0.0
dilt_rt = 1
x = nfly.fcn_conv_2d(x, ftrs_i, kn_sz, (2, 2), act_str, dilt_rt, False, 'same', 0.0, name + 'downspl_1', parm_norm, parm_init, parm_reg, parm_cstr) # (bs, 128, 128, 1*ftrs_i)
x = nfly.fcn_conv_2d_bna(x, 2*ftrs_i, kn_sz, (2, 2), act_str, dilt_rt, False, 'same', dp_rt, name + 'downspl_2', parm_norm, parm_init, parm_reg, parm_cstr) # (bs, 64, 64, 2*ftrs_i)
x = nfly.fcn_conv_2d_bna(x, 4*ftrs_i, kn_sz, (2, 2), act_str, dilt_rt, False, 'same', dp_rt, name + 'downspl_3', parm_norm, parm_init, parm_reg, parm_cstr) # (bs, 32, 32, 4*ftrs_i)
x = nfly.fcn_conv_2d_bna(x, 8*ftrs_i, kn_sz, (1, 1), act_str, dilt_rt, False, 'same', dp_rt, name + 'middle', parm_norm, parm_init, parm_reg, parm_cstr) # (bs, 32, 32, 8*ftrs_i)
x = nfly.fcn_conv_2d(x, ftrs_o, kn_sz, (1, 1), None, dilt_rt, True, 'same', 0.0, name + 'last', parm_norm, parm_init, parm_reg={'reg_wtyp': 0, 'reg_atyp': 0}, parm_cstr={'cstr_typ': 0}) # (bs, 32, 32, 1)
return x
def nn_model_disc(input_shape, input_name, prefix_layer, model_name):
ftrs_o = 1
act_str = 'leaky_relu'
x_i = tf.keras.layers.Input(shape=input_shape, name=input_name, dtype='float32')
x = fcn_net_disc_v1(x_i, ftrs_o, act_str, prefix_layer)
return tf.keras.models.Model(inputs=x_i, outputs=x, name=model_name)
#########################################################################################
######################################## model ##########################################
#########################################################################################
if GAN_TYP == 1:
class My_model(tf.keras.Model):
def __init__(self, input_shape, *args, **kwargs):
super(My_model, self).__init__(*args, **kwargs)
self.gen = nn_model_gen(input_shape, 'input_gen', 'gen_', 'nEM_model_rest')
self.disc = nn_model_disc((input_shape[0], input_shape[1], 2), 'input_disc', 'disc_', 'nEM_model_disc')
self.met_l1 = tf.keras.metrics.Mean(name="met_l1")
self.val_met_l1= tf.keras.metrics.Mean(name="met_l1")
self.loss_pw_w = tf.keras.metrics.Mean(name="loss_pw_w")
self.val_loss_pw_w = tf.keras.metrics.Mean(name="loss_pw_w")
self.loss_gen_w = tf.keras.metrics.Mean(name="loss_gen_w")
self.val_loss_gen_w = tf.keras.metrics.Mean(name="loss_gen_w")
self.loss_gen_reg = tf.keras.metrics.Mean(name="loss_gen_reg")
self.val_loss_gen_reg = tf.keras.metrics.Mean(name="loss_gen_reg")
self.loss_disc_adv_reg = tf.keras.metrics.Mean(name="loss_disc_adv_reg")
self.val_loss_disc_adv_reg = tf.keras.metrics.Mean(name="loss_disc_adv_reg")
if LOSS_TYP[0]:
self.loss_gen_adv_w = tf.keras.metrics.Mean(name="loss_gen_adv_w")
self.loss_gen_adv = tf.keras.metrics.Mean(name="loss_gen_adv")
self.val_loss_gen_adv_w = tf.keras.metrics.Mean(name="loss_gen_adv_w")
self.val_loss_gen_adv = tf.keras.metrics.Mean(name="val_loss_gen_adv")
if LOSS_TYP[1]:
self.loss_disc_adv_w = tf.keras.metrics.Mean(name="loss_disc_adv_w")
self.loss_disc_adv = tf.keras.metrics.Mean(name="loss_disc_adv")
self.val_loss_disc_adv_w = tf.keras.metrics.Mean(name="loss_disc_adv_w")
self.val_loss_disc_adv = tf.keras.metrics.Mean(name="loss_disc_adv")
if LOSS_TYP[2]:
self.loss_l1_w = tf.keras.metrics.Mean(name="loss_l1_w")
self.loss_l1 = tf.keras.metrics.Mean(name="loss_l1")
self.val_loss_l1_w = tf.keras.metrics.Mean(name="loss_l1_w")
self.val_loss_l1 = tf.keras.metrics.Mean(name="loss_l1")
if LOSS_TYP[3]:
self.loss_l2_w = tf.keras.metrics.Mean(name="loss_l2_w")
self.loss_l2 = tf.keras.metrics.Mean(name="loss_l2")
self.val_loss_l2_w = tf.keras.metrics.Mean(name="loss_l2_w")
self.val_loss_l2 = tf.keras.metrics.Mean(name="loss_l2")
if LOSS_TYP[4]:
self.loss_l1_mlwt_w = tf.keras.metrics.Mean(name="loss_l1_mlwt_w")
self.loss_l1_mlwt = tf.keras.metrics.Mean(name="loss_l1_mlwt")
self.val_loss_l1_mlwt_w = tf.keras.metrics.Mean(name="loss_l1_mlwt_w")
self.val_loss_l1_mlwt = tf.keras.metrics.Mean(name="loss_l1_mlwt")
if LOSS_TYP[5]:
self.loss_l1_fft_w = tf.keras.metrics.Mean(name="loss_l1_fft_w")
self.loss_l1_fft = tf.keras.metrics.Mean(name="loss_l1_fft")
self.val_loss_l1_fft_w = tf.keras.metrics.Mean(name="loss_l1_fft_w")
self.val_loss_l1_fft = tf.keras.metrics.Mean(name="loss_l1_fft")
if LOSS_TYP[6]:
self.loss_l1_mean_w = tf.keras.metrics.Mean(name="loss_l1_mean_w")
self.loss_l1_mean = tf.keras.metrics.Mean(name="loss_l1_mean")
self.val_loss_l1_mean_w = tf.keras.metrics.Mean(name="loss_l1_mean_w")
self.val_loss_l1_mean = tf.keras.metrics.Mean(name="loss_l1_mean")
if LOSS_TYP[7]:
self.loss_l1_std_w = tf.keras.metrics.Mean(name="loss_l1_std_w")
self.loss_l1_std = tf.keras.metrics.Mean(name="loss_l1_std")
self.val_loss_l1_std_w = tf.keras.metrics.Mean(name="loss_l1_std_w")
self.val_loss_l1_std = tf.keras.metrics.Mean(name="loss_l1_std")
self.ibg = tf.Variable(0.0, dtype=tf.float32, trainable=False)
self.ibg_m = 2*nvgb.TRAIN_N_DATA//nvgb.BATCH_SIZE
def compile(self, parm):
super(My_model, self).compile()
self.gen_opt = nfop.fcn_get_optimizer_from_vgb()
self.lr_schedule_gen = nfcb.Cb_Lr_schedule_base(
lr_min=parm['lr_min'],
lr_max=parm['opt_lr'],
m_min=parm['m_0'],
m_max=parm['opt_m'],
decay_steps=parm['decay_steps'],
decay_rate=parm['decay_rate'],
steps_per_cycle=parm['decay_steps'],
warmup_steps=parm['warmup_steps'],
cooldown_steps=parm['cooldown_steps'],
lr_0=parm['lr_0'],
warmup_steps_0=parm['warmup_steps_0'],
cooldown_steps_0=parm['cooldown_steps_0'],
decay_steps_0=parm['decay_steps_0'],
lrs_m_pow=parm['lrs_m_pow'],
lrs_lr_pow=parm['lrs_lr_pow'])
#########################################################################################
opt_typ, opt_lr, opt_m = 3, nvgb.OPTIMIZER_LR, 0.5
self.disc_opt = nfop.fcn_get_optimizer(opt_typ=opt_typ, opt_lr=opt_lr,
opt_m=opt_m, opt_nesterov=nvgb.OPTIMIZER_NESTEROV,
opt_beta_2=nvgb.OPTIMIZER_BETA_2, opt_eps=nvgb.OPTIMIZER_EPSILON,
opt_ctyp=0, opt_cval=nvgb.OPTIMIZER_CLIP_VALUE)
self.lr_schedule_disc = nfcb.Cb_Lr_schedule_base(
lr_min=1e-4*opt_lr,
lr_max=opt_lr,
m_min=0.0,
m_max=opt_m,
decay_steps=parm['decay_steps'],
decay_rate=parm['decay_rate'],
steps_per_cycle=parm['decay_steps'],
warmup_steps=parm['warmup_steps'],
cooldown_steps=parm['cooldown_steps'],
lr_0=1e-4*opt_lr,
warmup_steps_0=parm['warmup_steps_0'],
cooldown_steps_0=parm['cooldown_steps_0'],
decay_steps_0=parm['decay_steps_0'],
lrs_m_pow=parm['lrs_m_pow'],
lrs_lr_pow=parm['lrs_lr_pow'])
def reset_opt_iter(self):
tf.keras.backend.set_value(self.gen_opt.iterations, 0)
tf.keras.backend.set_value(self.disc_opt.iterations, 0)
def set_opt_lr_m(self):
self.lr_schedule_gen.get_set_opt_lr_m(self.gen_opt)
self.lr_schedule_disc.get_set_opt_lr_m(self.disc_opt)
def inc_opt_counter(self):
self.lr_schedule_gen.inc_counter()
self.lr_schedule_disc.inc_counter()
def call(self, inputs, training=None, mask=None):
return self.gen(inputs, training)
@tf.function
def train_step(self, data):
x, y = data
x = fcn_x(x, y, self.ibg, self.ibg_m)
with tf.GradientTape() as tape_gen, tf.GradientTape() as tape_disc:
# Forward pass
y_p = self.gen(x, training=True)
# pixelwise loss
loss_gen_pw_dict = fls_pw_w(y, y_p)
x_d, x_d_p = fcn_input_disc(x, y, y_p)
y_d = self.disc(x_d, training=True)
y_d_p = self.disc(x_d_p, training=True)
# gan-gen loss
loss_gen_adv_w, loss_gen_adv = fls_adv_w(y_d, y_d_p)
# gen loss
loss_gen_w = loss_gen_adv_w + loss_gen_pw_dict['loss_pw_w']
# gan-disc loss
loss_disc_adv_w, loss_disc_adv = fls_disc_w(y_d, y_d_p)
# regularization loss
loss_gen_reg = tf.reduce_sum(self.gen.losses)
loss_disc_adv_reg = tf.reduce_sum(self.disc.losses)
loss_gen_t = loss_gen_w + loss_gen_reg
loss_disc_adv_t = loss_disc_adv_w + loss_disc_adv_reg
# Compute gradient
grad_gen_t = tape_gen.gradient(loss_gen_t, self.gen.trainable_variables)
grad_disc_t = tape_disc.gradient(loss_disc_adv_t, self.disc.trainable_variables)
#clip gradients
grad_gen_t = nfop.fcn_optimizer_clip_gradients(self.gen_opt, grad_gen_t)
grad_disc_t = nfop.fcn_optimizer_clip_gradients(self.disc_opt, grad_disc_t)
# Update gradient
self.gen_opt.apply_gradients(zip(grad_gen_t, self.gen.trainable_variables))
self.disc_opt.apply_gradients(zip(grad_disc_t, self.disc.trainable_variables))
# save metrics
self.met_l1.update_state(loss_gen_pw_dict['met_l1'])
metrics_out = {'met_l1': self.met_l1.result()}
# save losses
self.loss_pw_w.update_state(loss_gen_pw_dict['loss_pw_w'])
metrics_out.update({'loss_pw_w': self.loss_pw_w.result()})
self.loss_gen_w.update_state(loss_gen_w)
metrics_out.update({'loss_gen_w': self.loss_gen_w.result()})
self.loss_gen_reg.update_state(loss_gen_reg)
metrics_out.update({'loss_gen_reg': self.loss_gen_reg.result()})
self.loss_disc_adv_reg.update_state(loss_disc_adv_reg)
metrics_out.update({'loss_disc_adv_reg': self.loss_disc_adv_reg.result()})
if LOSS_TYP[0]:
self.loss_gen_adv_w.update_state(loss_gen_adv_w)
self.loss_gen_adv.update_state(loss_gen_adv)
metrics_out.update({'loss_gen_adv_w': self.loss_gen_adv_w.result(), 'loss_gen_adv': self.loss_gen_adv.result()})
if LOSS_TYP[1]:
self.loss_disc_adv_w.update_state(loss_disc_adv_w)
self.loss_disc_adv.update_state(loss_disc_adv)
metrics_out.update({'loss_disc_adv_w': self.loss_disc_adv_w.result(), 'loss_disc_adv': self.loss_disc_adv.result()})
if LOSS_TYP[2]:
self.loss_l1_w.update_state(loss_gen_pw_dict['loss_l1_w'])
self.loss_l1.update_state(loss_gen_pw_dict['loss_l1'])
metrics_out.update({'loss_l1_w': self.loss_l1_w.result(), 'loss_l1': self.loss_l1.result()})
if LOSS_TYP[3]:
self.loss_l2_w.update_state(loss_gen_pw_dict['loss_l2_w'])
self.loss_l2.update_state(loss_gen_pw_dict['loss_l2'])
metrics_out.update({'loss_l2_w': self.loss_l2_w.result(), 'loss_l2': self.loss_l2.result()})
if LOSS_TYP[4]:
self.loss_l1_mlwt_w.update_state(loss_gen_pw_dict['loss_l1_mlwt_w'])
self.loss_l1_mlwt.update_state(loss_gen_pw_dict['loss_l1_mlwt'])
metrics_out.update({'loss_l1_mlwt_w': self.loss_l1_mlwt_w.result(), 'loss_l1_mlwt': self.loss_l1_mlwt.result()})
if LOSS_TYP[5]:
self.loss_l1_fft_w.update_state(loss_gen_pw_dict['loss_l1_fft_w'])
self.loss_l1_fft.update_state(loss_gen_pw_dict['loss_l1_fft'])
metrics_out.update({'loss_l1_fft_w': self.loss_l1_fft_w.result(), 'loss_l1_fft': self.loss_l1_fft.result()})
if LOSS_TYP[6]:
self.loss_l1_mean_w.update_state(loss_gen_pw_dict['loss_l1_mean_w'])
self.loss_l1_mean.update_state(loss_gen_pw_dict['loss_l1_mean'])
metrics_out.update({'loss_l1_mean_w': self.loss_l1_mean_w.result(), 'loss_l1_mean': self.loss_l1_mean.result()})
if LOSS_TYP[7]:
self.loss_l1_std_w.update_state(loss_gen_pw_dict['loss_l1_std_w'])
self.loss_l1_std.update_state(loss_gen_pw_dict['loss_l1_std'])
metrics_out.update({'loss_l1_std_w': self.loss_l1_std_w.result(), 'loss_l1_std': self.loss_l1_std.result()})
self.ibg.assign_add(1.0)
return metrics_out
@tf.function
def test_step(self, data):
x, y = data
# Forward pass
y_p = self.gen(x, training=False)
# pixelwise loss
loss_gen_pw_dict = fls_pw_w(y, y_p)
x_d, x_d_p = fcn_input_disc(x, y, y_p)
y_d = self.disc(x_d, training=False)
y_d_p = self.disc(x_d_p, training=False)
# gan-gen loss
loss_gen_adv_w, loss_gen_adv = fls_adv_w(y_d, y_d_p)
# gen loss
loss_gen_w = loss_gen_adv_w + loss_gen_pw_dict['loss_pw_w']
# gan-disc loss
loss_disc_adv_w, loss_disc_adv = fls_disc_w(y_d, y_d_p)
# regularization loss
loss_gen_reg = tf.reduce_sum(self.gen.losses)
loss_disc_adv_reg = tf.reduce_sum(self.disc.losses)
# save metrics
self.val_met_l1.update_state(loss_gen_pw_dict['met_l1'])
metrics_out = {'met_l1': self.val_met_l1.result()}
# save losses
self.val_loss_pw_w.update_state(loss_gen_pw_dict['loss_pw_w'])
metrics_out.update({'loss_pw_w': self.val_loss_pw_w.result()})
self.val_loss_gen_w.update_state(loss_gen_w)
metrics_out.update({'loss_gen_w': self.val_loss_gen_w.result()})
self.val_loss_gen_reg.update_state(loss_gen_reg)
metrics_out.update({'loss_gen_reg': self.val_loss_gen_reg.result()})
self.val_loss_disc_adv_reg.update_state(loss_disc_adv_reg)
metrics_out.update({'loss_disc_adv_reg': self.val_loss_disc_adv_reg.result()})
if LOSS_TYP[0]:
self.val_loss_gen_adv_w.update_state(loss_gen_adv_w)
self.val_loss_gen_adv.update_state(loss_gen_adv)
metrics_out.update({'loss_gen_adv_w': self.val_loss_gen_adv_w.result(), 'loss_gen_adv': self.val_loss_gen_adv.result()})
if LOSS_TYP[1]:
self.val_loss_disc_adv_w.update_state(loss_disc_adv_w)
self.val_loss_disc_adv.update_state(loss_disc_adv)
metrics_out.update({'loss_disc_adv_w': self.val_loss_disc_adv_w.result(), 'loss_disc_adv': self.val_loss_disc_adv.result()})
if LOSS_TYP[2]:
self.val_loss_l1_w.update_state(loss_gen_pw_dict['loss_l1_w'])
self.val_loss_l1.update_state(loss_gen_pw_dict['loss_l1'])
metrics_out.update({'loss_l1_w': self.val_loss_l1_w.result(), 'loss_l1': self.val_loss_l1.result()})
if LOSS_TYP[3]:
self.val_loss_l2_w.update_state(loss_gen_pw_dict['loss_l2_w'])
self.val_loss_l2.update_state(loss_gen_pw_dict['loss_l2'])
metrics_out.update({'loss_l2_w': self.val_loss_l2_w.result(), 'loss_l2': self.val_loss_l2.result()})
if LOSS_TYP[4]:
self.val_loss_l1_mlwt_w.update_state(loss_gen_pw_dict['loss_l1_mlwt_w'])
self.val_loss_l1_mlwt.update_state(loss_gen_pw_dict['loss_l1_mlwt'])
metrics_out.update({'loss_l1_mlwt_w': self.val_loss_l1_mlwt_w.result(), 'loss_l1_mlwt': self.val_loss_l1_mlwt.result()})
if LOSS_TYP[5]:
self.val_loss_l1_fft_w.update_state(loss_gen_pw_dict['loss_l1_fft_w'])
self.val_loss_l1_fft.update_state(loss_gen_pw_dict['loss_l1_fft'])
metrics_out.update({'loss_l1_fft_w': self.val_loss_l1_fft_w.result(), 'loss_l1_fft': self.val_loss_l1_fft.result()})
if LOSS_TYP[6]:
self.val_loss_l1_mean_w.update_state(loss_gen_pw_dict['loss_l1_mean_w'])
self.val_loss_l1_mean.update_state(loss_gen_pw_dict['loss_l1_mean'])
metrics_out.update({'loss_l1_mean_w': self.val_loss_l1_mean_w.result(), 'loss_l1_mean': self.val_loss_l1_mean.result()})
if LOSS_TYP[7]:
self.val_loss_l1_std_w.update_state(loss_gen_pw_dict['loss_l1_std_w'])
self.val_loss_l1_std.update_state(loss_gen_pw_dict['loss_l1_std'])
metrics_out.update({'loss_l1_std_w': self.val_loss_l1_std_w.result(), 'loss_l1_std': self.val_loss_l1_std.result()})
return metrics_out
@property
def metrics(self):
metrics_out = [self.met_l1, self.val_met_l1,
self.loss_pw_w, self.val_loss_pw_w,
self.loss_gen_w, self.val_loss_gen_w,
self.loss_gen_reg, self.val_loss_gen_reg,
self.loss_disc_adv_reg, self.val_loss_disc_adv_reg]
if LOSS_TYP[0]:
metrics_out.extend([self.loss_gen_adv_w, self.loss_gen_adv, self.val_loss_gen_adv_w, self.val_loss_gen_adv])
if LOSS_TYP[1]:
metrics_out.extend([self.loss_disc_adv_w, self.loss_disc_adv, self.val_loss_disc_adv_w, self.val_loss_disc_adv])
if LOSS_TYP[2]:
metrics_out.extend([self.loss_l1_w, self.loss_l1, self.val_loss_l1_w, self.val_loss_l1])
if LOSS_TYP[3]:
metrics_out.extend([self.loss_l2_w, self.loss_l2, self.val_loss_l2_w, self.val_loss_l2])
if LOSS_TYP[4]:
metrics_out.extend([self.loss_l1_mlwt_w, self.loss_l1_mlwt, self.val_loss_l1_mlwt_w, self.val_loss_l1_mlwt])
if LOSS_TYP[5]:
metrics_out.extend([self.loss_l1_fft_w, self.loss_l1_fft, self.val_loss_l1_fft_w, self.val_loss_l1_fft])
if LOSS_TYP[6]:
metrics_out.extend([self.loss_l1_mean_w, self.loss_l1_mean, self.val_loss_l1_mean_w, self.val_loss_l1_mean])
if LOSS_TYP[7]:
metrics_out.extend([self.loss_l1_std_w, self.loss_l1_std, self.val_loss_l1_std_w, self.val_loss_l1_std])
return metrics_out
else:
class My_model(tf.keras.Model):
def __init__(self, input_shape, *args, **kwargs):
super(My_model, self).__init__(*args, **kwargs)
self.gen = nn_model_gen(input_shape, 'input_gen', 'gen_', 'nEM_model_rest')
self.met_l1 = tf.keras.metrics.Mean(name="met_l1")
self.val_met_l1= tf.keras.metrics.Mean(name="met_l1")
self.loss_pw_w = tf.keras.metrics.Mean(name="loss_pw_w")
self.val_loss_pw_w = tf.keras.metrics.Mean(name="loss_pw_w")
self.loss_gen_reg = tf.keras.metrics.Mean(name="loss_gen_reg")
self.val_loss_gen_reg = tf.keras.metrics.Mean(name="loss_gen_reg")
if LOSS_TYP[2]:
self.loss_l1_w = tf.keras.metrics.Mean(name="loss_l1_w")
self.loss_l1 = tf.keras.metrics.Mean(name="loss_l1")
self.val_loss_l1_w = tf.keras.metrics.Mean(name="loss_l1_w")
self.val_loss_l1 = tf.keras.metrics.Mean(name="loss_l1")
if LOSS_TYP[3]:
self.loss_l2_w = tf.keras.metrics.Mean(name="loss_l2_w")
self.loss_l2 = tf.keras.metrics.Mean(name="loss_l2")
self.val_loss_l2_w = tf.keras.metrics.Mean(name="loss_l2_w")
self.val_loss_l2 = tf.keras.metrics.Mean(name="loss_l2")
if LOSS_TYP[4]:
self.loss_l1_mlwt_w = tf.keras.metrics.Mean(name="loss_l1_mlwt_w")
self.loss_l1_mlwt = tf.keras.metrics.Mean(name="loss_l1_mlwt")
self.val_loss_l1_mlwt_w = tf.keras.metrics.Mean(name="loss_l1_mlwt_w")
self.val_loss_l1_mlwt = tf.keras.metrics.Mean(name="loss_l1_mlwt")
if LOSS_TYP[5]:
self.loss_l1_fft_w = tf.keras.metrics.Mean(name="loss_l1_fft_w")
self.loss_l1_fft = tf.keras.metrics.Mean(name="loss_l1_fft")
self.val_loss_l1_fft_w = tf.keras.metrics.Mean(name="loss_l1_fft_w")
self.val_loss_l1_fft = tf.keras.metrics.Mean(name="loss_l1_fft")
if LOSS_TYP[6]:
self.loss_l1_mean_w = tf.keras.metrics.Mean(name="loss_l1_mean_w")
self.loss_l1_mean = tf.keras.metrics.Mean(name="loss_l1_mean")
self.val_loss_l1_mean_w = tf.keras.metrics.Mean(name="loss_l1_mean_w")
self.val_loss_l1_mean = tf.keras.metrics.Mean(name="loss_l1_mean")
if LOSS_TYP[7]:
self.loss_l1_std_w = tf.keras.metrics.Mean(name="loss_l1_std_w")
self.loss_l1_std = tf.keras.metrics.Mean(name="loss_l1_std")
self.val_loss_l1_std_w = tf.keras.metrics.Mean(name="loss_l1_std_w")
self.val_loss_l1_std = tf.keras.metrics.Mean(name="loss_l1_std")
self.ibg = tf.Variable(0.0, dtype=tf.float32, trainable=False)
self.ibg_m = 2*nvgb.TRAIN_N_DATA//nvgb.BATCH_SIZE
def compile(self, parm):
super(My_model, self).compile()
self.gen_opt = nfop.fcn_get_optimizer_from_vgb()
self.lr_schedule_gen = nfcb.Cb_Lr_schedule_base(
lr_min=parm['lr_min'],
lr_max=parm['opt_lr'],
m_min=parm['m_0'],
m_max=parm['opt_m'],
decay_steps=parm['decay_steps'],
decay_rate=parm['decay_rate'],
steps_per_cycle=parm['decay_steps'],
warmup_steps=parm['warmup_steps'],
cooldown_steps=parm['cooldown_steps'],
lr_0=parm['lr_0'],
warmup_steps_0=parm['warmup_steps_0'],
cooldown_steps_0=parm['cooldown_steps_0'],
decay_steps_0=parm['decay_steps_0'],
lrs_m_pow=parm['lrs_m_pow'],
lrs_lr_pow=parm['lrs_lr_pow'])
def reset_opt_iter(self):
tf.keras.backend.set_value(self.gen_opt.iterations, 0)
def set_opt_lr_m(self):
self.lr_schedule_gen.get_set_opt_lr_m(self.gen_opt)
def inc_opt_counter(self):
self.lr_schedule_gen.inc_counter()
def call(self, inputs, training=None, mask=None):
return self.gen(inputs, training)
@tf.function
def train_step(self, data):
x, y = data
x = fcn_x(x, y, self.ibg, self.ibg_m)
with tf.GradientTape() as tape_gen:
# Forward pass
y_p = self.gen(x, training=True)
# pixelwise loss
loss_gen_pw_dict = fls_pw_w(y, y_p)
# regularization loss
loss_gen_reg = tf.reduce_sum(self.gen.losses)
loss_gen_t = loss_gen_reg + loss_gen_pw_dict['loss_pw_w']
# Compute gradient
grad_gen_t = tape_gen.gradient(loss_gen_t, self.gen.trainable_variables)
#clip gradients
grad_gen_t = nfop.fcn_optimizer_clip_gradients(self.gen_opt, grad_gen_t)
# Update gradient
self.gen_opt.apply_gradients(zip(grad_gen_t, self.gen.trainable_variables))
# save metrics
self.met_l1.update_state(loss_gen_pw_dict['met_l1'])
metrics_out = {'met_l1': self.met_l1.result()}
# save losses
self.loss_pw_w.update_state(loss_gen_pw_dict['loss_pw_w'])
metrics_out.update({'loss_pw_w': self.loss_pw_w.result()})
self.loss_gen_reg.update_state(loss_gen_reg)
metrics_out.update({'loss_gen_reg': self.loss_gen_reg.result()})
if LOSS_TYP[2]:
self.loss_l1_w.update_state(loss_gen_pw_dict['loss_l1_w'])
self.loss_l1.update_state(loss_gen_pw_dict['loss_l1'])
metrics_out.update({'loss_l1_w': self.loss_l1_w.result(), 'loss_l1': self.loss_l1.result()})
if LOSS_TYP[3]:
self.loss_l2_w.update_state(loss_gen_pw_dict['loss_l2_w'])
self.loss_l2.update_state(loss_gen_pw_dict['loss_l2'])
metrics_out.update({'loss_l2_w': self.loss_l2_w.result(), 'loss_l2': self.loss_l2.result()})
if LOSS_TYP[4]:
self.loss_l1_mlwt_w.update_state(loss_gen_pw_dict['loss_l1_mlwt_w'])
self.loss_l1_mlwt.update_state(loss_gen_pw_dict['loss_l1_mlwt'])
metrics_out.update({'loss_l1_mlwt_w': self.loss_l1_mlwt_w.result(), 'loss_l1_mlwt': self.loss_l1_mlwt.result()})
if LOSS_TYP[5]:
self.loss_l1_fft_w.update_state(loss_gen_pw_dict['loss_l1_fft_w'])
self.loss_l1_fft.update_state(loss_gen_pw_dict['loss_l1_fft'])
metrics_out.update({'loss_l1_fft_w': self.loss_l1_fft_w.result(), 'loss_l1_fft': self.loss_l1_fft.result()})
if LOSS_TYP[6]:
self.loss_l1_mean_w.update_state(loss_gen_pw_dict['loss_l1_mean_w'])
self.loss_l1_mean.update_state(loss_gen_pw_dict['loss_l1_mean'])
metrics_out.update({'loss_l1_mean_w': self.loss_l1_mean_w.result(), 'loss_l1_mean': self.loss_l1_mean.result()})
if LOSS_TYP[7]:
self.loss_l1_std_w.update_state(loss_gen_pw_dict['loss_l1_std_w'])
self.loss_l1_std.update_state(loss_gen_pw_dict['loss_l1_std'])
metrics_out.update({'loss_l1_std_w': self.loss_l1_std_w.result(), 'loss_l1_std': self.loss_l1_std.result()})
self.ibg.assign_add(1.0)
return metrics_out
@tf.function
def test_step(self, data):
x, y = data
# Forward pass
y_p = self.gen(x, training=False)
# pixelwise loss
loss_gen_pw_dict = fls_pw_w(y, y_p)
# regularization loss
loss_gen_reg = tf.reduce_sum(self.gen.losses)
# save metrics
self.val_met_l1.update_state(loss_gen_pw_dict['met_l1'])
metrics_out = {'met_l1': self.val_met_l1.result()}
# save losses
self.val_loss_pw_w.update_state(loss_gen_pw_dict['loss_pw_w'])
metrics_out.update({'loss_pw_w': self.val_loss_pw_w.result()})
self.val_loss_gen_reg.update_state(loss_gen_reg)
metrics_out.update({'loss_gen_reg': self.val_loss_gen_reg.result()})
if LOSS_TYP[2]:
self.val_loss_l1_w.update_state(loss_gen_pw_dict['loss_l1_w'])
self.val_loss_l1.update_state(loss_gen_pw_dict['loss_l1'])
metrics_out.update({'loss_l1_w': self.val_loss_l1_w.result(), 'loss_l1': self.val_loss_l1.result()})
if LOSS_TYP[3]:
self.val_loss_l2_w.update_state(loss_gen_pw_dict['loss_l2_w'])
self.val_loss_l2.update_state(loss_gen_pw_dict['loss_l2'])
metrics_out.update({'loss_l2_w': self.val_loss_l2_w.result(), 'loss_l2': self.val_loss_l2.result()})
if LOSS_TYP[4]:
self.val_loss_l1_mlwt_w.update_state(loss_gen_pw_dict['loss_l1_mlwt_w'])
self.val_loss_l1_mlwt.update_state(loss_gen_pw_dict['loss_l1_mlwt'])
metrics_out.update({'loss_l1_mlwt_w': self.val_loss_l1_mlwt_w.result(), 'loss_l1_mlwt': self.val_loss_l1_mlwt.result()})
if LOSS_TYP[5]:
self.val_loss_l1_fft_w.update_state(loss_gen_pw_dict['loss_l1_fft_w'])
self.val_loss_l1_fft.update_state(loss_gen_pw_dict['loss_l1_fft'])
metrics_out.update({'loss_l1_fft_w': self.val_loss_l1_fft_w.result(), 'loss_l1_fft': self.val_loss_l1_fft.result()})
if LOSS_TYP[6]:
self.val_loss_l1_mean_w.update_state(loss_gen_pw_dict['loss_l1_mean_w'])
self.val_loss_l1_mean.update_state(loss_gen_pw_dict['loss_l1_mean'])
metrics_out.update({'loss_l1_mean_w': self.val_loss_l1_mean_w.result(), 'loss_l1_mean': self.val_loss_l1_mean.result()})
if LOSS_TYP[7]:
self.val_loss_l1_std_w.update_state(loss_gen_pw_dict['loss_l1_std_w'])
self.val_loss_l1_std.update_state(loss_gen_pw_dict['loss_l1_std'])
metrics_out.update({'loss_l1_std_w': self.val_loss_l1_std_w.result(), 'loss_l1_std': self.val_loss_l1_std.result()})
return metrics_out
@property
def metrics(self):
metrics_out = [self.met_l1, self.val_met_l1,
self.loss_pw_w, self.val_loss_pw_w,
self.loss_gen_reg, self.val_loss_gen_reg]
if LOSS_TYP[2]:
metrics_out.extend([self.loss_l1_w, self.loss_l1, self.val_loss_l1_w, self.val_loss_l1])
if LOSS_TYP[3]:
metrics_out.extend([self.loss_l2_w, self.loss_l2, self.val_loss_l2_w, self.val_loss_l2])
if LOSS_TYP[4]:
metrics_out.extend([self.loss_l1_mlwt_w, self.loss_l1_mlwt, self.val_loss_l1_mlwt_w, self.val_loss_l1_mlwt])
if LOSS_TYP[5]:
metrics_out.extend([self.loss_l1_fft_w, self.loss_l1_fft, self.val_loss_l1_fft_w, self.val_loss_l1_fft])
if LOSS_TYP[6]:
metrics_out.extend([self.loss_l1_mean_w, self.loss_l1_mean, self.val_loss_l1_mean_w, self.val_loss_l1_mean])
if LOSS_TYP[7]:
metrics_out.extend([self.loss_l1_std_w, self.loss_l1_std, self.val_loss_l1_std_w, self.val_loss_l1_std])
return metrics_out
#########################################################################################
####################################### write image #####################################
#########################################################################################
class Cb_Test_Plot(tf.keras.callbacks.Callback):
def __init__(self, parm): # add other arguments to __init__ if you need
super(Cb_Test_Plot, self).__init__()
self.wi_log_dir = os.path.join(parm['log_dir'], 'test')
self.n_spl = parm['test_n_samples']
self.parm = parm
# spacing for plotting
self.wspace = 0.025
self.vspace = 0.010
self.wsize_fig = 25.0
self.file_test_writer = tf.summary.create_file_writer(logdir=self.wi_log_dir, max_queue=1)
x, y = self.fcn_load_test_mat(self.parm['test_mat_path'])
self.wi_ds_x = tf.data.Dataset.from_tensor_slices(x).batch(self.n_spl)
self.wi_x = self.fcn_norm_x_image(x)
self.wi_y_sft, self.wi_y_sc = nfge.fcn_get_norm_parm_image(y, [1, 2])
self.wi_y = y
self.wi_ibg = np.array(0).astype(np.int64)
self.wi_ickp = np.array(0).astype(np.int64)
def fcn_load_test_mat(self, path_mat):
x_mat, y_mat = nfge.fcn_ds_read_x_y_mat(path_mat, 'x', 'y', x_typ_mat, y_typ_mat, x_ndim, y_ndim)
x_mat = x_mat[..., 0:self.n_spl].copy()
x_mat = np.moveaxis(x_mat, 3, 0)
x = tf.convert_to_tensor(x_mat, x_typ_tf[0])
y_mat = y_mat[..., 0:self.n_spl].copy()
y_mat = np.moveaxis(y_mat, 3, 0)
y = tf.convert_to_tensor(y_mat, y_typ_tf[0])
x, y = fcn_resize_xy(x, y, False)
x, y = fcn_scale_inputs_x_y(x, y, [1, 2])
return x, y
def fcn_norm_x_image(self, x, axis=[1, 2]):
xn = nfge.fcn_norm_image(x, axis)
return xn.numpy()
def fcn_norm_y_image(self, y, axis=[1, 2]):
yn = nfge.fcn_norm_image(y, axis, self.wi_y_sft, self.wi_y_sc)
return yn.numpy()
def fcn_xy_image_gen(self, y_t, y_p):
x = self.wi_x
if type(x) is not np.ndarray:
x = x.numpy()
if type(y_t) is not np.ndarray:
y_t = y_t.numpy()
if type(y_p) is not np.ndarray:
y_p = y_p.numpy()
yn = self.fcn_norm_y_image(y_t)
yn_p = self.fcn_norm_y_image(y_p)
rows = 4
hsize_fig = (self.wsize_fig-(self.n_spl-1)*self.wspace)*rows/self.n_spl
figure = plt.figure(figsize=(self.wsize_fig, hsize_fig))
for ik in range(self.n_spl):
x_ik = x[ik, ...].squeeze()
dy = y_t[ik, ...].squeeze() - y_p[ik, ...].squeeze()
ee = np.mean(np.fabs(dy))
dyn = dy - np.min(dy)
dyn = dyn/np.max(dyn)
y_ik = yn[ik, ...].squeeze()
y_p_ik = yn_p[ik, ...].squeeze()
for iy in range(rows):
im_x_ik = x_ik if iy==0 else y_p_ik if iy==1 else y_ik if iy==2 else dyn
ax = plt.subplot(4, self.n_spl, iy*self.n_spl + ik + 1)
ax.imshow(im_x_ik)
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
if iy==0:
title = 'e = {:4.3f}'.format(ee)
ax.set_title(title, fontsize=14)
figure.subplots_adjust(hspace=self.vspace, wspace=self.wspace)
figure.tight_layout()
return nfge.fcn_plot_to_image(figure)
def on_train_begin(self, logs=None):
self.model.reset_opt_iter()
def on_train_batch_begin(self, batch, logs=None):
self.model.set_opt_lr_m()
# saving learning rate and momentum
if self.wi_ibg % self.parm['log_update_freq'] == 0:
with self.file_test_writer.as_default():
lr, m = nfop.fcn_read_opt_lr(self.model.gen_opt), nfop.fcn_read_opt_m(self.model.gen_opt)
tf.summary.scalar(name='batch_gen_lr', data=lr, step=self.wi_ibg)
tf.summary.scalar(name='batch_gen_m', data=m, step=self.wi_ibg)
if GAN_TYP == 1:
lr, m = nfop.fcn_read_opt_lr(self.model.disc_opt), nfop.fcn_read_opt_m(self.model.disc_opt)
tf.summary.scalar(name='batch_disc_lr', data=lr, step=self.wi_ibg)
tf.summary.scalar(name='batch_disc_m', data=m, step=self.wi_ibg)
# saving test data
if self.wi_ibg % self.parm['test_update_freq'] == 0:
y_p = self.model.predict(self.wi_ds_x)
with self.file_test_writer.as_default():
tf.summary.image(name='data', data=self.fcn_xy_image_gen(self.wi_y, y_p), step=self.wi_ibg)
# saving weights
if self.wi_ibg % self.parm['log_checkpoint_freq'] == 0:
self.model.gen.save_weights(self.parm['ckp_gen_path'].format(self.wi_ickp))
if GAN_TYP == 1:
self.model.disc.save_weights(self.parm['ckp_disc_path'].format(self.wi_ickp))
self.wi_ickp += 1
# reset metrics
if (self.wi_ibg > 0) and (self.wi_ibg % self.parm['reset_metric_freq'] == 0):
self.model.reset_metrics()
self.wi_ibg += 1
self.model.inc_opt_counter()
#########################################################################################
#########################################################################################
#########################################################################################
def fcn_load_weights(model, path_dir, bb_load_disc=True, by_name=True):
if os.path.exists(path_dir):
dir_weights = nfge.fcn_read_files(path_dir, ".h5")
path_gen = [s for s in dir_weights if "gen" in s]
path_gen.sort()
if len(path_gen):
model.gen = nfge.fcn_load_weights(model.gen, path_gen[-1], by_name=by_name, skip_mismatch=False)
if bb_load_disc:
path_disc = [s for s in dir_weights if "disc" in s]
path_disc.sort()
if len(path_disc) and (GAN_TYP == 1):
model.disc = nfge.fcn_load_weights(model.disc, path_disc[-1], by_name=by_name, skip_mismatch=False)
return model
def fcn_compile_and_fit(parm, bb_print_model_summary=False):
def fcn_compile_strategy(parm, bb_print_model_summary):
# set optimizer
model = My_model(parm['input_shape'])
if parm['bb_parm_search']:
# save weights
if not os.path.exists(parm['grid_search_dir']):
os.makedirs(parm['grid_search_dir'])
# load initial weights
model = nfge.fcn_load_weights(model, parm['weigths_load_path'], bb_load_disc=True, by_name=True)
# save weights
model.save_weights(parm['weigths_search_path'])
else:
model = fcn_load_weights(model, parm['weigths_load_path'], bb_load_disc=True, by_name=True)
# reset ibg value
model.ibg.assign(0.0)
# print summary
if bb_print_model_summary:
print(model.gen.summary())
if GAN_TYP == 1:
print(model.disc.summary())
# print training information
nvgb.fcn_print_training_parm(parm)
# compile
model.compile(parm)
return model
# clear session
tf.keras.backend.clear_session()
# generate model
if nvgb.N_GPUS == 1:
model = fcn_compile_strategy(parm, bb_print_model_summary)
else:
mirrored_strategy = tf.distribute.MirroredStrategy(cross_device_ops = tf.distribute.ReductionToOneDevice())
with mirrored_strategy.scope():
model = fcn_compile_strategy(parm, bb_print_model_summary)
# set datasets
train_dataset, val_dataset = nfge.fcn_load_datasets(fcn_ds_map, parm)
if parm['bb_parm_search']:
val_dataset = None
parm['validation_steps'] = None
parm['validation_freq'] = 1
#fit
model.fit(train_dataset,
epochs=parm['epochs'],
steps_per_epoch=parm['train_steps'],
callbacks=parm['callbacks'],
validation_data=val_dataset,
validation_steps=parm['validation_steps'],
validation_freq=parm['validation_freq'],
initial_epoch=parm['initial_epoch'],
verbose=parm['verbose_fit'])
#########################################################################################
#################################### initialization #####################################
#########################################################################################
def fcn_init(bb_parm_search=False):
# parse arguments
parser = nfge.fcn_parse_gen_input_parm()
root = '/media/ssd_1'
dat_id = vars(parser.parse_args())['modv'] // 100
# 0: gan - gen
# 1: gan - disc
# 2: L1
# 3: L2
# 4: L1 - MLWT
# 5: L1 - FFT
# 6: mean
# 7: std
if dat_id == 1:
dir_db = 'dataset_hrsem'
elif dat_id == 2:
dir_db = 'dataset_lrsem'
elif dat_id == 3:
dir_db = 'dataset_hrstem'
elif dat_id == 4:
dir_db = 'dataset_lrstem'
elif dat_id == 5:
dir_db = 'dataset_hrtem'
elif dat_id == 6:
dir_db = 'dataset_lrtem'
parm = nfge.fcn_init(parser,
db_dir=dir_db,
opt_nesterov=False,
opt_beta_2=0.999,
opt_eps=1.0e-5,
opt_ctyp=0, # 0: None, 1: clip by value, 2: clip by norm
opt_cval=2.0, # 0: 4.0, 1: 8.0
norm_trainable=True,
norm_eps=1e-3,
norm_pos=1, # 1: before layer, 2: after layer
norm_reg_typ=0,
norm_reg_gamma=0,
norm_reg_beta=1e-8,
norm_cstr_typ=0,
norm_cstr_v0=0.01,
norm_cstr_ve=8.0,
norm_cstr_rt=0.01,
norm_cstr_ax=[0],
norm_renorm=False,
norm_renorm_m=0.99,
dp_spt=False, # use spatial dropout
n_classes=1,
res_sfr=1.0,
dsn_typ=3, # 1: dense, 2: dense bottleneck, 3: residual dense, 4: residual dense bottleneck, 5: down/up residual dense, 6: down/up residual dense bottleneck
dsn_compr=0.5,
dsn_in_fr=2,
dsn_btn_fr=4,
dsn_fsl_act=None,
bb_parm_search=bb_parm_search,
root=root,
fn_ext=None,
gpu_memory_limit=None)
parm['input_shape'] = (nvgb.X_SHAPE[0]//DS_FTR, nvgb.X_SHAPE[1]//DS_FTR, nvgb.X_SHAPE[2])
parm['output_shape'] = (nvgb.Y_SHAPE[0]//DS_FTR, nvgb.Y_SHAPE[1]//DS_FTR, nvgb.Y_SHAPE[2])
parm['x_shape'] = (nvgb.X_SHAPE[0], nvgb.X_SHAPE[1], nvgb.Y_SHAPE[2])
parm['y_shape'] = (nvgb.Y_SHAPE[0], nvgb.Y_SHAPE[1], nvgb.Y_SHAPE[2])
parm['x_typ_mat'] = x_typ_mat
parm['y_typ_mat'] = y_typ_mat
parm['x_typ_rtf'] = x_typ_rtf
parm['y_typ_rtf'] = y_typ_rtf
parm['x_typ_tf'] = x_typ_tf
parm['y_typ_tf'] = y_typ_tf
parm['ckp_default'] = False
log_dir_root = os.path.split(parm['checkpoint_path'])[0]
parm['ckp_gen_path'] = os.path.join(log_dir_root, 'gen-{:04d}.h5')
parm['ckp_disc_path'] = os.path.join(log_dir_root, 'disc-{:04d}.h5')
return parm | 52,012 | 34.215301 | 204 | py |
r_em | r_em-master/training/nn_training.py | #-*- coding: utf-8 -*-
"""
Created on Thu Sep 26 14:31:55 2019
@author: Ivan
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import sys
#########################################################################################
import tensorflow as tf
#########################################################################################
import nn_var_glob as nvgb
import nn_fcns_gen as nfge
import nn_fcns_callbacks as nfcb
#########################################################################################
import training.nn_fcns_local as nflc
#########################################################################################
def fcn_run(parm):
train_steps = nvgb.TRAIN_N_DATA//nvgb.BATCH_SIZE
validation_steps = nvgb.VAL_N_DATA//nvgb.BATCH_SIZE
#########################################################################################
decay_every_epochs = 1.0
epochs = round(decay_every_epochs*1000)
decay_steps = round(decay_every_epochs*train_steps)
decay_rate = 0.90
#########################################################################################
lr_min = 1e-4*parm['opt_lr']
lr_0 = 1e-4*parm['opt_lr']
m_0 = 0.0
lrs_min = 1e-4
lrs_max = 1e-1
shuffle_buffer_size = np.minimum(train_steps, 32*nvgb.BATCH_SIZE) # number of elements
prefetch_buffer_size = nvgb.N_GPUS*np.maximum(1, shuffle_buffer_size//nvgb.BATCH_SIZE) # number of batches (after batch)
num_parallel_calls = np.minimum(2, nvgb.N_GPUS*2)
log_update_freq = 32
log_checkpoint_freq = np.maximum(1024, train_steps//2)
print_update_freq = np.minimum(1024, 1*train_steps//8)
test_update_freq = np.maximum(log_update_freq, log_update_freq*(train_steps//(1*log_update_freq)))
test_n_samples = 8
validation_freq = 1
histogram_freq = 1
reset_metric_freq = log_update_freq
########################################################s#################################
parm['weigths_load_path'] = ''
#########################################################################################
warmup_steps_0 = 1*train_steps//8
warmup_steps_0 = np.minimum(256*2, train_steps//2)
cooldown_steps_0 = np.minimum(decay_steps-warmup_steps_0-8192, train_steps//64)
cooldown_steps_0 = np.minimum(np.maximum(0, decay_steps-warmup_steps_0-32), 0*decay_steps//1)
decay_every_epochs_0 = 3*decay_every_epochs
decay_steps_0 = round(decay_every_epochs_0*train_steps)
warmup_steps = 0
cooldown_steps = np.minimum(decay_steps-warmup_steps-32, train_steps//64)
lrs_m_pow = 1.0
lrs_lr_pow = 1.0
#########################################################################################
parm.update({
'epochs': epochs,
'lr_min': lr_min,
'm_0': m_0,
'lr_0': lr_0,
'lrs_min': lrs_min,
'lrs_max': lrs_max,
'decay_steps': decay_steps,
'decay_rate': decay_rate,
'train_steps': train_steps,
'validation_steps': validation_steps,
'warmup_steps_0': warmup_steps_0,
'cooldown_steps_0': cooldown_steps_0,
'decay_steps_0': decay_steps_0,
'warmup_steps': warmup_steps,
'cooldown_steps': cooldown_steps,
'lrs_m_pow': lrs_m_pow,
'lrs_lr_pow': lrs_m_pow,
'shuffle_buffer_size': shuffle_buffer_size,
'prefetch_buffer_size': prefetch_buffer_size,
'num_parallel_calls': num_parallel_calls,
'log_checkpoint_freq': log_checkpoint_freq,
'log_update_freq': log_update_freq,
'print_update_freq': print_update_freq,
'test_update_freq': test_update_freq,
'test_n_samples': test_n_samples,
'validation_freq': validation_freq,
'histogram_freq': histogram_freq,
'reset_metric_freq': reset_metric_freq,
'write_grads': False,
'initial_epoch': 0,
'verbose_fit': 2
})
#################################### callbacks ##########################################
parm['callbacks'] = nfcb.fcn_callbacks_train(parm, nflc)
#########################################################################################
nflc.fcn_compile_and_fit(parm, bb_print_model_summary=True)
if __name__ == '__main__':
parm = nflc.fcn_init(bb_parm_search=False)
fcn_run(parm) | 4,069 | 33.786325 | 121 | py |
r_em | r_em-master/tk_r_em/version.py | __version__ = '1.0.4'
__name__ = 'tk_r_em'
__description__ = 'Deep convolutional neural networks to restore single-shot electron microscopy images'
__author__ = 'Ivan Lobato'
__author_email__='ivanlh20@gmail.com'
__url__ = 'https://github.com/Ivanlh20/r_em/'
__credits__ = 'University of Antwerp'
__license__ = 'GPLv3' | 318 | 38.875 | 104 | py |
r_em | r_em-master/tk_r_em/__init__.py | from .tk_r_em import load_network, load_sim_test_data, load_hrstem_exp_test_data | 80 | 80 | 80 | py |
r_em | r_em-master/tk_r_em/tk_r_em.py | """
r_em network suites designed to restore different modalities of electron microscopy data
Author: Ivan Lobato
Email: Ivanlh20@gmail.com
"""
import os
import pathlib
from typing import Tuple
import h5py
import numpy as np
import tensorflow as tf
def expand_dimensions(x):
if x.ndim == 2:
return np.expand_dims(x, axis=(0, 3))
elif x.ndim == 3 and x.shape[-1] != 1:
return np.expand_dims(x, axis=3)
else:
return x
def add_extra_row_or_column(x):
if x.shape[1] % 2 == 1:
v_mean = x.mean(axis=(1, 2), keepdims=True)
v_mean_tiled = np.tile(v_mean, (1, 1, x.shape[2], 1))
x = np.concatenate((x, v_mean_tiled), axis=1)
if x.shape[2] % 2 == 1:
v_mean = x.mean(axis=(1, 2), keepdims=True)
v_mean_tiled = np.tile(v_mean, (1, x.shape[1], 1, 1))
x = np.concatenate((x, v_mean_tiled), axis=2)
return x
def add_extra_row_or_column_patch_based(x):
if x.shape[0] % 2 == 1:
v_mean = x.mean(axis=(0, 1), keepdims=True)
v_mean_tiled = np.tile(v_mean, (1, x.shape[1]))
x = np.concatenate((x, v_mean_tiled), axis=0)
if x.shape[1] % 2 == 1:
v_mean = x.mean(axis=(0, 1), keepdims=True)
v_mean_tiled = np.tile(v_mean, (x.shape[0], 1))
x = np.concatenate((x, v_mean_tiled), axis=1)
return x
def remove_extra_row_or_column(x, x_i_sh):
if x_i_sh != x.shape:
return x[:, :x_i_sh[1], :x_i_sh[2], :]
else:
return x
def remove_extra_row_or_column_patch_based(x, x_i_sh):
if x_i_sh != x.shape:
return x[:x_i_sh[0], :x_i_sh[1]]
else:
return x
def adjust_output_dimensions(x, x_i_shape):
ndim = len(x_i_shape)
if ndim == 2:
return x.squeeze()
elif ndim == 3:
if x_i_shape[-1] == 1:
return x.squeeze(axis=0)
else:
return x.squeeze(axis=-1)
else:
return x
def get_centered_range(n, patch_size, stride):
patch_size_half = patch_size // 2
if patch_size_half == n-patch_size_half:
return np.array([patch_size_half])
p = np.arange(patch_size_half, n-patch_size_half, stride)
if p[-1] + patch_size_half < n:
p = np.append(p, n - patch_size_half)
return p
def get_range(im_shape, patch_size, strides):
py = get_centered_range(im_shape[0], patch_size[0], strides[0])
px = get_centered_range(im_shape[1], patch_size[1], strides[1])
for iy in py:
for ix in px:
yield slice(iy - patch_size[0] // 2, iy + patch_size[0] // 2), slice(ix - patch_size[1] // 2, ix + patch_size[1] // 2)
def process_prediction(data, x_r, count_map, window, ib, sy, sx):
for ik in range(ib):
x_r_ik = data[ik, ..., 0].squeeze() * window
count_map[sy[ik], sx[ik]] += window
x_r[sy[ik], sx[ik]] += x_r_ik
def butterworth_window(shape, cutoff_radius_ftr, order):
assert len(shape) == 2, "Shape must be a tuple of length 2 (height, width)"
assert 0 < cutoff_radius_ftr <= 0.5, "Cutoff frequency must be in the range (0, 0.5]"
def butterworth_1d(length, cutoff_radius_ftr, order):
n = np.arange(-length//2, length-length//2)
window = 1 / (1 + (n / (cutoff_radius_ftr * length)) ** (2 * order))
return window
window_y = butterworth_1d(shape[0], cutoff_radius_ftr, order)
window_x = butterworth_1d(shape[1], cutoff_radius_ftr, order)
window = np.outer(window_y, window_x)
return window
class Model(tf.keras.Model):
def __init__(self, model_path):
super(Model, self).__init__()
self.base_model = tf.keras.models.load_model(model_path, compile=False)
self.base_model.compile()
def call(self, inputs, training=None, mask=None):
return self.base_model(inputs, training=training, mask=mask)
def summary(self):
return self.base_model.summary()
def predict(self, x, batch_size=16, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False):
x_i_sh = x.shape
# Expanding dimensions based on the input shape
x = expand_dimensions(x)
# Converting to float32 if necessary
x = x.astype(np.float32)
x_i_sh_e = x.shape
# Adding extra row or column if necessary
x = add_extra_row_or_column(x)
batch_size = min(batch_size, x.shape[0])
# Model prediction
x = self.base_model.predict(x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
# Removing extra row or column if added
x = remove_extra_row_or_column(x, x_i_sh_e)
# Adjusting output dimensions to match input dimensions
return adjust_output_dimensions(x, x_i_sh)
def predict_patch_based(self, x, patch_size=None, stride=None, batch_size=16):
if patch_size is None:
return self.predict(x, batch_size=batch_size)
x = x.squeeze().astype(np.float32)
x_i_sh_e = x.shape
# Adding extra row or column if necessary
x = add_extra_row_or_column_patch_based(x)
patch_size = max(patch_size, 128)
patch_size = (min(patch_size, x.shape[0]), min(patch_size, x.shape[1]))
# Adjust the stride to have an overlap between patches
overlap = (patch_size[0]//2, patch_size[1]//2)
if stride is None:
stride = overlap
else:
stride = (min(stride, overlap[0]), min(stride, overlap[1]))
batch_size = max(batch_size, 4)
data = np.zeros((batch_size, *patch_size, 1), dtype=np.float32)
sy = [slice(0) for _ in range(batch_size)]
sx = [slice(0) for _ in range(batch_size)]
x_r = np.zeros(x.shape, dtype=np.float32)
count_map = np.zeros(x.shape, dtype=np.float32)
window = butterworth_window(patch_size, 0.33, 4)
ib = 0
for s_iy, s_ix in get_range(x.shape, patch_size, stride):
if ib < batch_size:
data[ib, ..., 0] = x[s_iy, s_ix]
sy[ib] = s_iy
sx[ib] = s_ix
ib += 1
if ib == batch_size:
data = self.base_model.predict(data, batch_size=batch_size)
process_prediction(data, x_r, count_map, window, ib, sy, sx)
ib = 0
if ib != batch_size:
data = self.base_model.predict(data[:ib, ...], batch_size=batch_size)
process_prediction(data, x_r, count_map, window, ib, sy, sx)
# Normalize the denoised image using the count_map
x_r /= count_map
# Removing extra row or column if added
x = remove_extra_row_or_column_patch_based(x, x_i_sh_e)
return x_r
def load_network(model_name: str = 'sfr_hrstem'):
"""
Load r_em neural network model.
:param model_name: A string representing the name of the model.
:return: A tensorflow.keras.Model object.
"""
if os.path.isdir(model_name):
model_path = pathlib.Path(model_name).resolve()
else:
model_name = model_name.lower()
model_path = pathlib.Path(__file__).resolve().parent / 'models' / model_name
model = Model(model_path)
return model
def load_sim_test_data(file_name: str = 'sfr_hrstem') -> Tuple[np.ndarray, np.ndarray]:
"""
Load test data for r_em neural network.
:param model_name: A string representing the name of the model.
:return: A tuple containing two numpy arrays representing the input (x) and output (y) data.
"""
if os.path.isfile(file_name):
path = pathlib.Path(file_name).resolve()
else:
file_name = file_name.lower()
path = pathlib.Path(__file__).resolve().parent / 'test_data' / f'{file_name}.h5'
with h5py.File(path, 'r') as h5file:
x = np.asarray(h5file['x'][:], dtype=np.float32).transpose(0, 3, 2, 1)
y = np.asarray(h5file['y'][:], dtype=np.float32).transpose(0, 3, 2, 1)
return x, y
def load_hrstem_exp_test_data(file_name: str = 'exp_hrstem') -> Tuple[np.ndarray, np.ndarray]:
"""
Load test data for r_em neural network.
:param model_name: A string representing the name of the model.
:return: A tuple containing two numpy arrays representing the input (x) and output (y) data.
"""
if os.path.isfile(file_name):
path = pathlib.Path(file_name).resolve()
else:
file_name = file_name.lower()
path = pathlib.Path(__file__).resolve().parent / 'test_data' / f'{file_name}.h5'
with h5py.File(path, 'r') as f:
x = f['x'][:]
if x.ndim == 4:
x = np.asarray(x, dtype=np.float32).transpose(0, 3, 2, 1)
else:
x = np.asarray(x, dtype=np.float32).transpose(1, 0)
return x | 8,852 | 32.534091 | 136 | py |
ramps | ramps-master/examples/simulated-unicycle/simulator.py | #!/usr/bin/python
#
# Simulates an MDP-Strategy
import math
import sys
import resource
import subprocess
import signal
import tempfile
import copy
import itertools
import random
from PIL import Image
import pygame, pygame.locals
# ==================================
# Settings
# ==================================
MAGNIFY = 64
# ==================================
# Entry point
# ==================================
if len(sys.argv)<2:
print >>sys.stderr, "Error: Need PNG file as parameter"
sys.exit(1)
specFile = sys.argv[1]
rampsParameters = sys.argv[2:]
# ==================================
# Read input image
# ==================================
pngfile = Image.open(specFile)
pngFileBasis = specFile[0:specFile.rfind(".png")]
# print "Size of Workspace:",pngfile.size
xsize = pngfile.size[0]
ysize = pngfile.size[1]
imageData = pngfile.getdata()
palette = pngfile.getpalette()
if (xsize>1023):
print >>sys.stderr,"Error: Scenario is too large - not supported."
sys.exit(1)
if (ysize>1023):
print >>sys.stderr,"Error: Scenario is too large - not supported."
sys.exit(1)
# ==================================
# Read parameter file
# ==================================
parameterFileName = pngFileBasis+".params"
allParams = {}
for a in open(parameterFileName,"r").readlines():
a = a.strip()
if len(a)>0 and a[0]!='#':
posEqual = a.index("=")
allParams[a[0:posEqual].strip()] = a[posEqual+1:].strip()
# ==================================
# Parse parameter file
# ==================================
nofDirections = int(allParams["nofDirections"])
initX = int(allParams["initX"])
initY = int(allParams["initY"])
initDir = int(allParams["initDir"])
positionUpdateNoise = float(allParams["positionUpdateNoise"])
unicycleSpeed = float(allParams["unicycleSpeed"])
probabilityDirectionChangeFail = float(allParams["probabilityDirectionChangeFail"])
# ==================================
# Construct MDP --> States
# ==================================
with open(pngFileBasis+".sta","w") as stateFile:
stateFile.write("(xpos,ypos,direction,color2,color3,color4,color5,color6,color7,color8)\n")
stateMapper = {}
for x in xrange(0,xsize):
for y in xrange(0,ysize):
for d in xrange(0,nofDirections):
color = imageData[y*xsize+x]
stateNum = len(stateMapper)
stateFile.write(str(stateNum)+":("+str(x)+","+str(y)+","+str(d))
for c in xrange(2,9):
if color==c:
stateFile.write(",1")
else:
stateFile.write(",0")
stateFile.write(")\n")
stateMapper[(x,y,d)] = stateNum
# Add error state
errorState = len(stateMapper)
stateMapper[(-1,-1,-1)] = errorState
stateFile.write(str(errorState)+":(-1,-1,-1,0,0,0,0,0,0,0)\n")
# ==================================
# Construct MDP --> Label file
# ==================================
with open(pngFileBasis+".lab","w") as labelFile:
labelFile.write("0=\"init\" 1=\"deadlock\"\n")
labelFile.write(str(stateMapper[(initX,initY,initDir)])+": 0\n")
# ==================================
# Construct MDP --> Transition file
# ==================================
# First, a function that computes the possible/likely
# transitions when going from a (x,y)-cell into some
# direction. It computes the image of the complete cell
# and then performs probability-weighting according to
# the areas of overlap
def computeSuccs(xpos,ypos,direction):
minX = float(xsize)
maxX = float(0)
minY = float(ysize)
maxY = float(0)
for (x,y) in [(xpos,ypos),(xpos+1,ypos),(xpos,ypos+1),(xpos+1,ypos+1)]:
destX = math.sin(direction/float(nofDirections)*2*math.pi)*unicycleSpeed+x
destY = math.cos(direction/float(nofDirections)*2*math.pi)*unicycleSpeed+y
minX = min(minX,destX)
maxX = max(maxX,destX)
minY = min(minY,destY)
maxY = max(maxY,destY)
minX -= positionUpdateNoise
maxX += positionUpdateNoise
minY -= positionUpdateNoise
maxY += positionUpdateNoise
sizeOfImage = (maxX-minX)*(maxY-minY)
targetCells = {(-1,-1):0.0}
for x in xrange(int(math.floor(minX)),int(math.ceil(maxX))):
for y in xrange(int(math.floor(minY)),int(math.ceil(maxY))):
# Compute volume of overlap
xStart = x
if x<minX:
xStart = minX
xEnd = x+1
if xEnd>maxX:
xEnd = maxX
yStart = y
if y<minY:
yStart = minY
yEnd = y+1
if yEnd>maxY:
yEnd = maxY
thisVolume = (xEnd-xStart)*(yEnd-yStart)
if (x>=0) and (y>=0) and (x<xsize) and (y<ysize) and imageData[x+y*xsize]!=1: # includes static obstacle (color 1)
targetCells[(x,y)] = thisVolume/sizeOfImage
else:
targetCells[(-1,-1)] += thisVolume/sizeOfImage
# print "TransitionProb from",xpos,ypos,direction," via ",minX,maxX,minY,maxY,"to:"
# for (a,b) in targetCells.iteritems():
# print a,":",b
return targetCells
# Iterate over all cells and compute transition probabilities
transitionLines = []
for x in xrange(0,xsize):
for y in xrange(0,ysize):
for d in xrange(0,nofDirections):
# Choice 1: No change
edgesNoChange = computeSuccs(x,y,d)
# Choice 0: Rotate -1
rotMinus1 = d-1
if rotMinus1 < 0:
rotMinus1 += nofDirections
edges = computeSuccs(x,y,rotMinus1)
for ((a,b),c) in edges.iteritems():
dPrime = rotMinus1
if (a==-1):
dPrime = -1
if c>0.0:
transitionLines.append([stateMapper[(x,y,d)],0,stateMapper[(a,b,dPrime)],c*(1.0-probabilityDirectionChangeFail)])
for ((a,b),c) in edgesNoChange.iteritems():
dPrime = d
if (a==-1):
dPrime = -1
if c>0.0 and probabilityDirectionChangeFail>0.0:
transitionLines.append([stateMapper[(x,y,d)],0,stateMapper[(a,b,dPrime)],c*probabilityDirectionChangeFail])
# Choice 1: No change
for ((a,b),c) in edgesNoChange.iteritems():
dPrime = d
if (a==-1):
dPrime = -1
if c>0:
transitionLines.append([stateMapper[(x,y,d)],1,stateMapper[(a,b,dPrime)],c])
# Choice 0: Rotate 1
rotPlus1 = d+1
if rotPlus1 >= nofDirections:
rotPlus1 -= nofDirections
edges = computeSuccs(x,y,rotPlus1)
for ((a,b),c) in edges.iteritems():
dPrime = rotPlus1
if (a==-1):
dPrime = -1
if c>0:
transitionLines.append([stateMapper[(x,y,d)],2,stateMapper[(a,b,dPrime)],c*(1-probabilityDirectionChangeFail)])
for ((a,b),c) in edgesNoChange.iteritems():
dPrime = d
if (a==-1):
dPrime = -1
if c>0 and probabilityDirectionChangeFail>0.0:
transitionLines.append([stateMapper[(x,y,d)],2,stateMapper[(a,b,dPrime)],c*probabilityDirectionChangeFail])
# Print transitions file: It contains the transitions computed earlier PLUS an error state self loop
with open(pngFileBasis+".tra","w") as transitionFile:
transitionFile.write(str(len(stateMapper))+" "+str(len(stateMapper)*3-2)+" "+str(len(transitionLines)+1)+"\n")
for (a,b,c,d) in transitionLines:
transitionFile.write(str(a)+" "+str(b)+" "+str(c)+" "+str(d)+"\n")
transitionFile.write(str(errorState)+" 0 "+str(errorState)+" 1.0\n")
# ==================================
# Compute and read strategy/policy
# ==================================
if not os.path.exists(pngFileBasis+".strategy") or (os.path.getmtime(pngFileBasis+".params")>os.path.getmtime(pngFileBasis+".strategy")):
with open(pngFileBasis+".strategy","wb") as out:
rampsProcess = subprocess.Popen(["../../src/ramps",pngFileBasis]+rampsParameters, bufsize=1048768, stdin=None, stdout=out)
returncode = rampsProcess.wait()
if (returncode!=0):
print >>sys.stderr, "RAMPS returned error code:",returncode
sys.exit(1)
policy = {}
currentPolicyState = None
with open(pngFileBasis+".strategy","r") as strat:
nofPolicyStates = int(strat.readline().strip())
while True:
line = strat.readline()
if line != '':
if line.startswith("->"):
line = line[2:].strip().split(" ")
assert len(line)==3
policy[currentPolicyState][2][int(line[0])] = (int(line[1]),int(line[2]))
else:
line = line.strip().split(" ")
assert len(line)==4
currentPolicyState = (int(line[0]),int(line[1]))
policy[currentPolicyState] = [int(line[2]),int(line[3]),{}]
else:
break
# ==================================
# Prepare reverse state mapper and
# Searchable transition list
# ==================================
reverseStateMapper = {}
for (a,b) in stateMapper.iteritems():
reverseStateMapper[b] = a
transitionLists = {}
for (a,b,c,d) in transitionLines:
if not (a,b) in transitionLists:
transitionLists[(a,b)] = [(c,d)]
else:
transitionLists[(a,b)].append((c,d))
# =========================================
# Initialize interactive display
# =========================================
pygame.init()
displayInfo = pygame.display.Info()
MAGNIFY = min(MAGNIFY,displayInfo.current_w*3/4/xsize)
MAGNIFY = min(MAGNIFY,displayInfo.current_h*3/4/ysize)
# ==================================
# Main loop
# ==================================
def actionLoop():
screen = pygame.display.set_mode(((xsize+2)*MAGNIFY,(ysize+2)*MAGNIFY))
pygame.display.set_caption('Policy Visualizer')
clock = pygame.time.Clock()
screenBuffer = pygame.Surface(screen.get_size())
screenBuffer = screenBuffer.convert()
screenBuffer.fill((64, 64, 64)) # Dark Gray
# Initialize Policy
policyState = None
policyData = None
isPaused = False
speed = 10
while 1:
resetInThisRound = False
# Process events
for event in pygame.event.get():
if event.type == pygame.locals.QUIT or (event.type == pygame.locals.KEYDOWN and event.key in [pygame.locals.K_ESCAPE,pygame.locals.K_q]):
return
if (event.type == pygame.locals.KEYDOWN and event.key == pygame.locals.K_SPACE):
isPaused = not isPaused
if (event.type == pygame.locals.KEYDOWN and event.key == pygame.locals.K_r):
resetInThisRound = True
if (event.type == pygame.locals.KEYDOWN and event.key == pygame.locals.K_PLUS):
speed += 1
if (event.type == pygame.locals.KEYDOWN and event.key == pygame.locals.K_MINUS):
speed = max(speed-1,1)
# Update
if resetInThisRound or (policyState==None):
policyState = 0
policyData = 0
# Obtain robot information for drawing
if (policyState,policyData) in policy:
(robotX,robotY,direction) = reverseStateMapper[policy[(policyState,policyData)][0]]
else:
(robotX,robotY,direction) = (-1,-1,-1) # Crashed
# Draw Field
for x in xrange(0,xsize):
for y in xrange(0,ysize):
paletteColor = imageData[y*xsize+x]
color = palette[paletteColor*3:paletteColor*3+3]
pygame.draw.rect(screenBuffer,color,((x+1)*MAGNIFY,(y+1)*MAGNIFY,MAGNIFY,MAGNIFY),0)
# Draw boundary
if robotX==-1:
boundaryColor = (255,0,0)
else:
boundaryColor = (64,64,64)
pygame.draw.rect(screenBuffer,boundaryColor,(0,0,MAGNIFY*(xsize+2),MAGNIFY),0)
pygame.draw.rect(screenBuffer,boundaryColor,(0,MAGNIFY,MAGNIFY,MAGNIFY*(ysize+1)),0)
pygame.draw.rect(screenBuffer,boundaryColor,(MAGNIFY*(xsize+1),MAGNIFY,MAGNIFY,MAGNIFY*(ysize+1)),0)
pygame.draw.rect(screenBuffer,boundaryColor,(MAGNIFY,MAGNIFY*(ysize+1),MAGNIFY*xsize,MAGNIFY),0)
# pygame.draw.rect(screenBuffer,boundaryColor,(0,0,MAGNIFY*(xsize+2),MAGNIFY),0)
# Draw "Good" Robot
if robotX!=-1:
pygame.draw.circle(screenBuffer, (192,32,32), ((robotX+1)*MAGNIFY+MAGNIFY/2,(robotY+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3-2, 0)
pygame.draw.circle(screenBuffer, (255,255,255), ((robotX+1)*MAGNIFY+MAGNIFY/2,(robotY+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3-1, 1)
pygame.draw.circle(screenBuffer, (0,0,0), ((robotX+1)*MAGNIFY+MAGNIFY/2,(robotY+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3, 1)
# Draw cell frames
for x in xrange(0,xsize):
for y in xrange(0,ysize):
pygame.draw.rect(screenBuffer,(0,0,0),((x+1)*MAGNIFY,(y+1)*MAGNIFY,MAGNIFY,MAGNIFY),1)
pygame.draw.rect(screenBuffer,(0,0,0),(MAGNIFY-1,MAGNIFY-1,MAGNIFY*xsize+2,MAGNIFY*ysize+2),1)
# Flip!
screen.blit(screenBuffer, (0, 0))
pygame.display.flip()
# Update state
if (not isPaused) and robotX!=-1:
randomNumber = random.random()
(mdpstate,decision,dataUpdate) = policy[(policyState,policyData)]
transitionList = transitionLists[(mdpstate,decision)]
dest = None
for (a,b) in transitionList:
if randomNumber<=b:
dest = a
randomNumber = 123.0
else:
randomNumber -= b
# Rounding error?
if (dest==None):
dest = transitionList[0][0]
# Update memory
# print policyState
# print decision
# print dest
# print policy[(policyState,policyData)]
assert dest in policy[(policyState,policyData)][2]
(policyState,policyData) = dataUpdate[dest]
# Make the transition
if not isPaused:
# Done
clock.tick(speed)
else:
clock.tick(3)
# ==================================
# Call main program
# ==================================
actionLoop()
| 14,622 | 36.494872 | 149 | py |
ramps | ramps-master/examples/two-robots/simulator.py | #!/usr/bin/python
#
# Simulates an MDP-Strategy
import math
import os
import sys
import resource
import subprocess
import signal
import tempfile
import copy
import itertools
import random
from PIL import Image
import pygame, pygame.locals
# ==================================
# Settings
# ==================================
MAGNIFY = 64
# ==================================
# Entry point
# ==================================
if len(sys.argv)<2:
print >>sys.stderr, "Error: Need PNG file as parameter"
sys.exit(1)
specFile = sys.argv[1]
rampsParameters = sys.argv[2:]
# ==================================
# Read input image
# ==================================
pngfile = Image.open(specFile)
pngFileBasis = specFile[0:specFile.rfind(".png")]
# print "Size of Workspace:",pngfile.size
xsize = pngfile.size[0]
ysize = pngfile.size[1]
imageData = pngfile.getdata()
palette = pngfile.getpalette()
if (xsize>1023):
print >>sys.stderr,"Error: Scenario is too large - not supported."
sys.exit(1)
if (ysize>1023):
print >>sys.stderr,"Error: Scenario is too large - not supported."
sys.exit(1)
# ==================================
# Read parameter file
# ==================================
parameterFileName = pngFileBasis+".params"
allParams = {}
for a in open(parameterFileName,"r").readlines():
a = a.strip()
if len(a)>0 and a[0]!='#':
posEqual = a.index("=")
allParams[a[0:posEqual].strip()] = a[posEqual+1:].strip()
# ==================================
# Parse parameter file
# ==================================
initXA = int(allParams["initXA"])
initYA = int(allParams["initYA"])
initXB = int(allParams["initXB"])
initYB = int(allParams["initYB"])
positionUpdateNoise = float(allParams["positionUpdateNoise"])
# ==================================
# Construct MDP --> States
# ==================================
with open(pngFileBasis+".sta","w") as stateFile:
stateFile.write("(xposA,yposA,xposB,yposB,color2A,color3A,color4A,color5A,color6A,color7A,color8A,color2B,color3B,color4B,color5B,color6B,color7B,color8B,carry,carrySuccess)\n")
stateMapper = {}
for xA in xrange(0,xsize):
for yA in xrange(0,ysize):
for xB in xrange(0,xsize):
for yB in xrange(0,ysize):
if xA!=xB or yA!=yB:
if (imageData[xA+yA*xsize]!=1) and (imageData[xB+yB*xsize]!=1):
carryModes = [(0,0)]
if xB==xA+2 and yA==yB:
carryModes.append((1,0))
if (imageData[xA+1+yA*xsize]==3):
carryModes.append((0,1))
for (a,b) in carryModes:
colorA = imageData[yA*xsize+xA]
stateNum = len(stateMapper)
stateFile.write(str(stateNum)+":("+str(xA)+","+str(yA)+","+str(xB)+","+str(yB))
for c in xrange(2,9):
if colorA==c:
stateFile.write(",1")
else:
stateFile.write(",0")
colorB = imageData[yB*xsize+xB]
for c in xrange(2,9):
if colorB==c:
stateFile.write(",1")
else:
stateFile.write(",0")
stateFile.write(","+str(a)+","+str(b)+")\n")
stateMapper[(xA,yA,xB,yB,a,b)] = stateNum
# Add error state
errorState = len(stateMapper)
errorStateKey = (-1,-1,-1,-1,-1,-1)
stateMapper[errorStateKey] = errorState
stateFile.write(str(errorState)+":(-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)\n")
# ==================================
# Construct MDP --> Label file
# ==================================
with open(pngFileBasis+".lab","w") as labelFile:
labelFile.write("0=\"init\" 1=\"deadlock\"\n")
labelFile.write(str(stateMapper[(initXA,initYA,initXB,initYB,0,0)])+": 0\n")
# ==================================
# Construct MDP --> Transition file
# ==================================
# First, a function that computes the possible/likely
# transitions when going from a (x,y)-cell into some
# direction. It computes the image of the complete cell
# and then performs probability-weighting according to
# the areas of overlap
def computeSuccs(xpos,ypos,direction):
# If direction is "4", this means no move
if (direction==4):
return [(xpos,ypos,1.0)]
succs = []
errorProb = 0.0
probs = [positionUpdateNoise/3.0,positionUpdateNoise/3.0,positionUpdateNoise/3.0,positionUpdateNoise/3.0]
probs[direction] = 1.0-positionUpdateNoise
if (xpos>0) and imageData[xpos-1+ypos*xsize]!=1:
succs.append((xpos-1,ypos,probs[0]))
else:
errorProb += probs[0]
if (xpos<xsize-1) and imageData[xpos+1+ypos*xsize]!=1:
succs.append((xpos+1,ypos,probs[1]))
else:
errorProb += probs[1]
if (ypos>0) and imageData[xpos+(ypos-1)*xsize]!=1:
succs.append((xpos,ypos-1,probs[2]))
else:
errorProb += probs[2]
if (ypos<ysize-1) and imageData[xpos+(ypos+1)*xsize]!=1:
succs.append((xpos,ypos+1,probs[3]))
else:
errorProb += probs[3]
if errorProb > 0.0:
succs.append((-1,-1,errorProb))
return succs
# Iterate over all cells and compute transition probabilities
transitionLines = []
overallNofTransitions = 0
for xA in xrange(0,xsize):
for yA in xrange(0,ysize):
for xB in xrange(0,xsize):
for yB in xrange(0,ysize):
if xA!=xB or yA!=yB:
if (imageData[xA+yA*xsize]!=1) and (imageData[xB+yB*xsize]!=1):
# Which current carry modes are possible for this combination?
carryModes = [0]
if xB==xA+2 and yA==yB:
carryModes.append(1)
# Normal motion.
for carryMode in carryModes:
sourceState = stateMapper[(xA,yA,xB,yB,carryMode,0)]
overallNofTransitions += 25
for dirA in [0,1,2,3,4]: # Action 4 is standing still
for dirB in [0,1,2,3,4]: # Action 4 is standing still
succA = computeSuccs(xA,yA,dirA)
succB = computeSuccs(xB,yB,dirB)
errorProb = 0.0
carryingSelfTransitionProb = 0.0
thisAction = dirA*5+dirB
for (destXA,destYA,probA) in succA:
for (destXB,destYB,probB) in succB:
if destXB!=destXA+2 or destYA!=destYB:
destCarryMode = 0
else:
destCarryMode = carryMode
if destXA==-1 or destXB==-1:
errorProb += probA*probB
elif destXA==destXB and destYA==destYB:
if carryMode==1:
carryingSelfTransitionProb += probA*probB
else:
errorProb += probA*probB
elif (imageData[destXA+destYA*xsize]==1) or (imageData[destXB+destYB*xsize]==1):
errorProb += probA*probB
else:
transitionLines.append([sourceState,thisAction,stateMapper[(destXA,destYA,destXB,destYB,destCarryMode,0)],probA*probB])
if errorProb>0:
transitionLines.append([sourceState,thisAction,errorState,errorProb])
if carryingSelfTransitionProb>0:
transitionLines.append([sourceState,thisAction,sourceState,carryingSelfTransitionProb])
# Picking up
if xB==xA+2 and yA==yB and (imageData[xA+1+yA*xsize]==2):
sourceState = stateMapper[(xA,yA,xB,yB,0,0)]
destState = stateMapper[(xA,yA,xB,yB,1,0)]
transitionLines.append([sourceState,25,destState,1.0])
overallNofTransitions += 1
# Dropping at the destination
if xB==xA+2 and yA==yB and (imageData[xA+1+yA*xsize]==3):
sourceState = stateMapper[(xA,yA,xB,yB,1,0)]
destState = stateMapper[(xA,yA,xB,yB,0,1)]
transitionLines.append([sourceState,25,destState,1.0])
# Recover after drop
sourceState = stateMapper[(xA,yA,xB,yB,0,1)]
destState = stateMapper[(xA,yA,xB,yB,0,0)]
transitionLines.append([sourceState,0,destState,1.0])
overallNofTransitions += 2
# Print transitions file: It contains the transitions computed earlier PLUS an error state self loop
with open(pngFileBasis+".tra","w") as transitionFile:
transitionFile.write(str(len(stateMapper))+" "+str(overallNofTransitions+1)+" "+str(len(transitionLines)+1)+"\n")
for (a,b,c,d) in transitionLines:
transitionFile.write(str(a)+" "+str(b)+" "+str(c)+" "+str(d)+"\n")
transitionFile.write(str(errorState)+" 0 "+str(errorState)+" 1.0\n")
# ==================================
# Compute and read strategy/policy
# ==================================
if not os.path.exists(pngFileBasis+".strategy") or (os.path.getmtime(pngFileBasis+".params")>os.path.getmtime(pngFileBasis+".strategy")):
with open(pngFileBasis+".strategy","wb") as out:
rampsProcess = subprocess.Popen(["../../src/ramps",pngFileBasis]+rampsParameters, bufsize=1048768, stdin=None, stdout=out)
returncode = rampsProcess.wait()
if (returncode!=0):
print >>sys.stderr, "RAMPS returned error code:",returncode
sys.exit(1)
policy = {}
currentPolicyState = None
with open(pngFileBasis+".strategy","r") as strat:
nofPolicyStates = int(strat.readline().strip())
while True:
line = strat.readline()
if line != '':
if line.startswith("->"):
line = line[2:].strip().split(" ")
assert len(line)==3
policy[currentPolicyState][2][int(line[0])] = (int(line[1]),int(line[2]))
else:
line = line.strip().split(" ")
assert len(line)==4
currentPolicyState = (int(line[0]),int(line[1]))
policy[currentPolicyState] = [int(line[2]),int(line[3]),{}]
else:
break
# ==================================
# Prepare reverse state mapper and
# Searchable transition list
# ==================================
reverseStateMapper = {}
for (a,b) in stateMapper.iteritems():
reverseStateMapper[b] = a
transitionLists = {}
for (a,b,c,d) in transitionLines:
if not (a,b) in transitionLists:
transitionLists[(a,b)] = [(c,d)]
else:
transitionLists[(a,b)].append((c,d))
# =========================================
# Initialize interactive display
# =========================================
pygame.init()
displayInfo = pygame.display.Info()
MAGNIFY = min(MAGNIFY,displayInfo.current_w*3/4/xsize)
MAGNIFY = min(MAGNIFY,displayInfo.current_h*3/4/ysize)
# ==================================
# Main loop
# ==================================
def actionLoop():
screen = pygame.display.set_mode(((xsize+2)*MAGNIFY,(ysize+2)*MAGNIFY))
pygame.display.set_caption('Policy Visualizer')
clock = pygame.time.Clock()
screenBuffer = pygame.Surface(screen.get_size())
screenBuffer = screenBuffer.convert()
screenBuffer.fill((64, 64, 64)) # Dark Gray
# Initialize Policy
policyState = None
policyData = None
isPaused = False
speed = 10
while 1:
resetInThisRound = False
# Process events
for event in pygame.event.get():
if event.type == pygame.locals.QUIT or (event.type == pygame.locals.KEYDOWN and event.key in [pygame.locals.K_ESCAPE,pygame.locals.K_q]):
return
if (event.type == pygame.locals.KEYDOWN and event.key == pygame.locals.K_SPACE):
isPaused = not isPaused
if (event.type == pygame.locals.KEYDOWN and event.key == pygame.locals.K_r):
resetInThisRound = True
if (event.type == pygame.locals.KEYDOWN and event.key == pygame.locals.K_PLUS):
speed += 1
if (event.type == pygame.locals.KEYDOWN and event.key == pygame.locals.K_MINUS):
speed = max(speed-1,1)
# Update
if resetInThisRound or (policyState==None):
policyState = 0
policyData = 0
# Obtain robot information for drawing
if (policyState,policyData) in policy:
(robotXA,robotYA,robotXB,robotYB,carryA,carryB) = reverseStateMapper[policy[(policyState,policyData)][0]]
else:
(robotXA,robotYA,robotXB,robotYB,carryA,carryB) = (-1,-1,-1,-1,-1,-1) # Crashed
# Draw Field
for x in xrange(0,xsize):
for y in xrange(0,ysize):
paletteColor = imageData[y*xsize+x]
color = palette[paletteColor*3:paletteColor*3+3]
pygame.draw.rect(screenBuffer,color,((x+1)*MAGNIFY,(y+1)*MAGNIFY,MAGNIFY,MAGNIFY),0)
# Draw boundary
if robotXA==-1:
boundaryColor = (255,0,0)
else:
boundaryColor = (64,64,64)
pygame.draw.rect(screenBuffer,boundaryColor,(0,0,MAGNIFY*(xsize+2),MAGNIFY),0)
pygame.draw.rect(screenBuffer,boundaryColor,(0,MAGNIFY,MAGNIFY,MAGNIFY*(ysize+1)),0)
pygame.draw.rect(screenBuffer,boundaryColor,(MAGNIFY*(xsize+1),MAGNIFY,MAGNIFY,MAGNIFY*(ysize+1)),0)
pygame.draw.rect(screenBuffer,boundaryColor,(MAGNIFY,MAGNIFY*(ysize+1),MAGNIFY*xsize,MAGNIFY),0)
# pygame.draw.rect(screenBuffer,boundaryColor,(0,0,MAGNIFY*(xsize+2),MAGNIFY),0)
# Draw "Good" Robot
if robotXA!=-1:
pygame.draw.circle(screenBuffer, (192,32,32), ((robotXA+1)*MAGNIFY+MAGNIFY/2,(robotYA+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3-2, 0)
pygame.draw.circle(screenBuffer, (255,255,255), ((robotXA+1)*MAGNIFY+MAGNIFY/2,(robotYA+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3-1, 1)
pygame.draw.circle(screenBuffer, (0,0,0), ((robotXA+1)*MAGNIFY+MAGNIFY/2,(robotYA+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3, 1)
pygame.draw.circle(screenBuffer, (192,192,32), ((robotXB+1)*MAGNIFY+MAGNIFY/2,(robotYB+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3-2, 0)
pygame.draw.circle(screenBuffer, (255,255,255), ((robotXB+1)*MAGNIFY+MAGNIFY/2,(robotYB+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3-1, 1)
pygame.draw.circle(screenBuffer, (0,0,0), ((robotXB+1)*MAGNIFY+MAGNIFY/2,(robotYB+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3, 1)
# Freight
if carryA==1:
pygame.draw.circle(screenBuffer, (30,192,192), ((robotXA+2)*MAGNIFY+MAGNIFY/2,(robotYA+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3-2, 0)
pygame.draw.circle(screenBuffer, (255,255,255), ((robotXA+2)*MAGNIFY+MAGNIFY/2,(robotYA+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3-1, 1)
pygame.draw.circle(screenBuffer, (0,0,0), ((robotXA+2)*MAGNIFY+MAGNIFY/2,(robotYA+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3, 1)
elif carryB==1:
pygame.draw.circle(screenBuffer, (200,200,200), ((robotXA+2)*MAGNIFY+MAGNIFY/2,(robotYA+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3-2, 0)
pygame.draw.circle(screenBuffer, (255,255,255), ((robotXA+2)*MAGNIFY+MAGNIFY/2,(robotYA+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3-1, 1)
pygame.draw.circle(screenBuffer, (0,0,0), ((robotXA+2)*MAGNIFY+MAGNIFY/2,(robotYA+1)*MAGNIFY+MAGNIFY/2) , MAGNIFY/3, 1)
# Draw cell frames
for x in xrange(0,xsize):
for y in xrange(0,ysize):
pygame.draw.rect(screenBuffer,(0,0,0),((x+1)*MAGNIFY,(y+1)*MAGNIFY,MAGNIFY,MAGNIFY),1)
pygame.draw.rect(screenBuffer,(0,0,0),(MAGNIFY-1,MAGNIFY-1,MAGNIFY*xsize+2,MAGNIFY*ysize+2),1)
# Flip!
screen.blit(screenBuffer, (0, 0))
pygame.display.flip()
# Update state
if (not isPaused) and robotXA!=-1:
randomNumber = random.random()
(mdpstate,decision,dataUpdate) = policy[(policyState,policyData)]
transitionList = transitionLists[(mdpstate,decision)]
dest = None
for (a,b) in transitionList:
if randomNumber<=b:
dest = a
randomNumber = 123.0
else:
randomNumber -= b
# Rounding error?
if (dest==None):
dest = transitionList[0][0]
# Update memory
# print policyState
# print decision
# print dest
# print policy[(policyState,policyData)]
assert dest in policy[(policyState,policyData)][2]
(policyState,policyData) = dataUpdate[dest]
# print "MDP", mdpstate, "PS/Data:",policyState,",",policyData, "decision",decision,"dataupdate",dataUpdate
# Make the transition
if not isPaused:
# Done
clock.tick(speed)
else:
clock.tick(3)
# ==================================
# Call main program
# ==================================
actionLoop()
| 18,269 | 42.396675 | 181 | py |
pyRVtest | pyRVtest-main/setup.py | """Sets up the package."""
from pathlib import Path
from setuptools import find_packages, setup
# define a function that reads a file in this directory
read = lambda p: Path(Path(__file__).resolve().parent / p).read_text()
# set up the package
setup(
name='pyRVtest',
author='Marco Duarte, Lorenzo Magnolfi, Mikkel Solvsten, Christopher Sullivan, Anya Tarascina',
author_email='chris.sullivan.econ@gmail.com',
url='https://github.com/anyatarascina/pyRVtest',
packages=find_packages(),
python_requires='>=3.7',
install_requires=read('requirements.txt').splitlines(),
extras_require={
'docs': [
'sphinx==5.0.2', 'pandas', 'ipython', 'astunparse', 'sphinx-rtd-theme==1.1.1',
'nbsphinx==0.8.11', 'jinja2==3.0.3', 'docutils==0.17.1', 'numpydoc'
],
},
license='MIT',
description='Code to perform econometric test of firm conduct',
long_description=read('README.rst').split('docs-start')[1].strip(),
include_package_data=True,
version='0.2.0'
)
| 1,038 | 32.516129 | 99 | py |
pyRVtest | pyRVtest-main/pyRVtest/version.py | """Current package version."""
__version__ = '0.2.0'
| 54 | 12.75 | 30 | py |
pyRVtest | pyRVtest-main/pyRVtest/primitives.py | """Primitive data structures that constitute the foundation of the BLP model."""
import abc
from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
from pyblp.utilities.basics import Array, Data, Groups, RecArray, extract_matrix, structure_matrices
from pyblp.configurations.formulation import ColumnFormulation
from . import options
from . construction import build_ownership
from .configurations.formulation import Formulation, ModelFormulation
from .data import F_CRITICAL_VALUES_POWER_RHO, F_CRITICAL_VALUES_SIZE_RHO
class Products(object):
r"""Product data structured as a record array.
Attributes
----------
market_ids : `ndarray`
IDs that associate product_data with markets.
cost_ids : `ndarray`
IDs that associate product_data with cost-side fixed effects.
nesting_ids : `ndarray`
IDs that associate product_data with nesting groups.
product_ids : `ndarray`
IDs that identify product_data within markets.
clustering_ids : `ndarray`
IDs used to compute clustered standard errors.
shares : `ndarray`
Market shares, :math:`s`.
prices : `ndarray`
Product prices, :math:`p`.
Z : `ndarray`
Instruments, :math: `Z`.
w : `ndarray`
Cost-shifters, :math: `w`.
"""
market_ids: Array
cost_ids: Array
nesting_ids: Array
product_ids: Array
clustering_ids: Array
shares: Array
prices: Array
Z: Array
w: Array
def __new__(
cls, cost_formulation: Formulation, instrument_formulation: Sequence[Optional[Formulation]],
product_data: Mapping) -> RecArray:
"""Structure product data."""
# validate the cost formulations
if not isinstance(cost_formulation, Formulation):
raise TypeError("cost_formulation must be a Formulation instance or None.")
if cost_formulation is None:
raise ValueError("The formulation for marginal cost must be specified.")
# build w
w, w_formulation, w_data = cost_formulation._build_matrix(product_data)
if 'shares' in w_data:
raise NameError("shares cannot be included in the formulation for marginal cost.")
# check that prices are not in X1
if 'prices' in w_data:
raise NameError("prices cannot be included in the formulation for marginal cost.")
# validate the instrument formulation
if instrument_formulation is None:
raise ValueError("The formulation for instruments for testing must be specified.")
if hasattr(instrument_formulation, '__len__'):
L = len(instrument_formulation)
if not all(isinstance(f, Formulation) for f in instrument_formulation):
raise TypeError("Each formulation in instrument_formulation must be a Formulation.")
else:
L = 1
if not isinstance(instrument_formulation, Formulation):
raise TypeError("instrument_formulation must be a single Formulation instance.")
# build Z
Z: Data = {}
if L == 1:
Z_l, Z_formulation_l, Z_data_l = instrument_formulation._build_matrix(product_data)
for z in Z_formulation_l:
if z in w_data:
raise NameError("Z must be excluded from marginal cost.")
Z["Z0"] = Z_l
Z["Z0_formulation"] = Z_formulation_l
Z["Z0_data"] = Z_data_l
elif L > 1:
for l in range(L):
Z_l, Z_formulation_l, Z_data_l = instrument_formulation[l]._build_matrix(product_data)
for z in Z_formulation_l:
if z in w_data:
raise NameError("Z must be excluded from marginal cost.")
Z["Z{0}".format(l)] = Z_l
Z["Z{0}_formulation".format(l)] = Z_formulation_l
Z["Z{0}_data".format(l)] = Z_data_l
# load fixed effect IDs
cost_ids = None
if cost_formulation._absorbed_terms:
cost_ids = cost_formulation._build_ids(product_data)
# load other IDs
market_ids = extract_matrix(product_data, 'market_ids')
nesting_ids = extract_matrix(product_data, 'nesting_ids')
product_ids = extract_matrix(product_data, 'product_ids')
clustering_ids = extract_matrix(product_data, 'clustering_ids')
if market_ids is None:
raise KeyError("product_data must have a market_ids field.")
if market_ids.shape[1] > 1:
raise ValueError("The market_ids field of product_data must be one-dimensional.")
if nesting_ids is not None and nesting_ids.shape[1] > 1:
raise ValueError("The nesting_ids field of product_data must be one-dimensional.")
if product_ids is not None and product_ids.shape[1] > 1:
raise ValueError("The product_ids field of product_data must be one-dimensional.")
if clustering_ids is not None:
if clustering_ids.shape[1] > 1:
raise ValueError("The clustering_ids field of product_data must be one-dimensional.")
if np.unique(clustering_ids).size == 1:
raise ValueError("The clustering_ids field of product_data must have at least two distinct IDs.")
# load shares
shares = extract_matrix(product_data, 'shares')
if shares is None:
raise KeyError("product_data must have a shares field.")
if shares.shape[1] > 1:
raise ValueError("The shares field of product_data must be one-dimensional.")
if (shares <= 0).any() or (shares >= 1).any():
raise ValueError(
"The shares field of product_data must consist of values between zero and one, exclusive.")
# verify that shares sum to less than one in each market
market_groups = Groups(market_ids)
bad_shares_index = market_groups.sum(shares) >= 1
if np.any(bad_shares_index):
bad_market_ids = market_groups.unique[bad_shares_index.flat]
raise ValueError(f"Shares in these markets do not sum to less than 1: {bad_market_ids}.")
# load prices
prices = extract_matrix(product_data, 'prices')
if prices is None:
raise KeyError("product_data must have a prices field.")
if prices.shape[1] > 1:
raise ValueError("The prices field of product_data must be one-dimensional.")
if (prices < 0).any():
raise ValueError(
"The prices field of product_data must consist of values >= zero, exclusive.")
# structure product fields as a mapping
product_mapping: Dict[Union[str, tuple], Tuple[Optional[Array], Any]] = {}
product_mapping.update({
'market_ids': (market_ids, np.object_),
'cost_ids': (cost_ids, np.object_),
'nesting_ids': (nesting_ids, np.object_),
'product_ids': (product_ids, np.object_),
'clustering_ids': (clustering_ids, np.object_),
'shares': (shares, options.dtype),
'prices': (prices, options.dtype)
})
product_mapping.update({(tuple(w_formulation), 'w'): (w, options.dtype), })
for l in range(L):
key = (tuple(Z["Z{0}_formulation".format(l)]), 'Z{0}'.format(l))
product_mapping.update({key: (Z["Z{0}".format(l)], options.dtype)})
# structure and validate variables underlying instruments
underlying_data = {k: (v, options.dtype) for k, v in {**w_data}.items() if k != 'shares'}
for l in range(L):
underlying_data.update({
k: (v, options.dtype) for k, v in {**Z["Z{0}_data".format(l)]}.items() if k != 'shares'
})
invalid_names = set(underlying_data) & {k if isinstance(k, str) else k[1] for k in product_mapping}
if invalid_names:
raise NameError(f"These reserved names in product_formulations are invalid: {list(invalid_names)}.")
return structure_matrices({**product_mapping, **underlying_data})
class Models(object):
r"""Models data structured as a dictionary.
Attributes
----------
models_downstream: `str`
Model of conduct for downstream firms. This is used to construct downstream markups. This must be one of the
allowed models.
models_upstream: `str, optional`
Model of conduct for upstream firms. This is used to construct upstream markups. This must be one of the
allowed models.
firm_ids_downstream: `ndarray`
Vector of firm ids used to construct ownership for downstream firms.
firm_ids_upstream: `ndarray`
Vector of firm ids used to construct ownership for upstream firms.
ownership_matrices_downstream: `ndarray`
Matrix of ownership relationships between downstream firms.
ownership_matrices_upstream: `ndarray`
Matrix of ownership relationships between upstream firms.
vertical_integration: `ndarray, optional`
Vector indicating which product_ids are vertically integrated (i.e. store brands).
vertical_integration_index: `ndarray, optional`
Indicates the index for a particular vertical relationship (which model it corresponds to).
unit_tax: `ndarray, optional`
A vector containing information on unit taxes.
unit_tax_name: `str, optional`
The column name for the column containing unit taxes.
advalorem_tax: `ndarray, optional`
A vector containing information on advalorem taxes.
advalorem_tax_name: ``str, optional`
The column name for the column containing advalorem taxes.
advalorem_payer: `str, optional`
If there are advalorem taxes in the model, this specifies who the payer of these taxes are. It can be either the
consumer or the firm.
cost_scaling: `ndarray, optional`
The cost scaling parameter.
cost_scaling_column: `str, optional`
The name of the column containing the cost scaling parameter.
custom_model: `dict, optional`
A custom formula used to compute markups, optionally specified by the user.
user_supplied_markups: `ndarray, optional`
A vector of user-computed markups.
user_supplied_markups_name: `str, optional`
The name of the column containing user-supplied markups.
"""
models_downstream: Array
models_upstream: Array
firm_ids_downstream: Array
firm_ids_upstream: Array
ownership_matrices_downstream: Array
ownership_matrices_upstream: Array
vertical_integration: Array
vertical_integration_index: Array
custom_model: Array
unit_tax: Array
unit_tax_name: Array
advalorem_tax: Array
advalorem_tax_name: Array
advalorem_payer: Array
cost_scaling: Array
cost_scaling_column: Array
user_supplied_markups: Array
user_supplied_markups_name: Array
def __new__(
cls, model_formulations: Sequence[Optional[ModelFormulation]], product_data: Mapping) -> RecArray:
"""Structure model data. Data structures may be empty."""
# validate the model formulations
if not all(isinstance(f, ModelFormulation) or f is None for f in model_formulations):
raise TypeError("Each formulation in model_formulations must be a ModelFormulation instance or None.")
M = len(model_formulations)
if M < 2:
raise ValueError("At least two model formulations must be specified.")
N = product_data.shape[0]
# initialize model components
models_downstream = [None] * M
models_upstream = [None] * M
firm_ids_downstream = [None] * M
firm_ids_upstream = [None] * M
ownership_matrices_downstream = [None] * M
ownership_matrices_upstream = [None] * M
vertical_integration = [None] * M
vertical_integration_index = [None] * M
custom_model = [None] * M
unit_tax = [None] * M
unit_tax_name = [None] * M
advalorem_tax = [None] * M
advalorem_tax_name = [None] * M
advalorem_payer = [None] * M
cost_scaling = [None] * M
cost_scaling_column = [None] * M
user_supplied_markups = [None] * M
user_supplied_markups_name = [None] * M
# extract data for each model
for m in range(M):
model = model_formulations[m]._build_matrix(product_data)
models_downstream[m] = model['model_downstream']
if model['model_upstream'] is not None:
models_upstream[m] = model['model_upstream']
# define ownership matrices for downstream model
model['firm_ids'] = model['ownership_downstream']
if model['model_downstream'] == 'monopoly':
ownership_matrices_downstream[m] = build_ownership(
product_data, model['ownership_downstream'], 'monopoly'
)
firm_ids_downstream[m] = 'monopoly'
else:
ownership_matrices_downstream[m] = build_ownership(
product_data, model['ownership_downstream'], model['kappa_specification_downstream']
)
firm_ids_downstream[m] = model['ownership_downstream']
# define ownership matrices for upstream model
model['firm_ids'] = model['ownership_upstream']
if model['model_upstream'] == 'monopoly':
ownership_matrices_upstream[m] = build_ownership(product_data, model['ownership_upstream'], 'monopoly')
firm_ids_upstream[m] = 'monopoly'
elif model['ownership_upstream'] is not None:
ownership_matrices_upstream[m] = build_ownership(
product_data, model['ownership_upstream'], model['kappa_specification_upstream']
)
firm_ids_upstream[m] = model['ownership_upstream']
# define vertical integration related variables
if model["vertical_integration"] is not None:
vertical_integration[m] = extract_matrix(product_data, model["vertical_integration"])
vertical_integration_index[m] = model["vertical_integration"]
# define unit tax
if model['unit_tax'] is not None:
unit_tax[m] = extract_matrix(product_data, model['unit_tax'])
unit_tax_name[m] = model['unit_tax']
elif model['unit_tax'] is None:
unit_tax[m] = np.zeros((N, 1))
# define ad valorem tax
if model['advalorem_tax'] is not None:
advalorem_tax[m] = extract_matrix(product_data, model['advalorem_tax'])
advalorem_tax_name[m] = model['advalorem_tax']
advalorem_payer[m] = model['advalorem_payer']
advalorem_payer[m] = advalorem_payer[m].replace('consumers', 'consumer').replace('firms', 'firm')
elif model['advalorem_tax'] is None:
advalorem_tax[m] = np.zeros((N, 1))
# define cost scaling
if model['cost_scaling'] is not None:
cost_scaling_column[m] = model['cost_scaling']
cost_scaling[m] = extract_matrix(product_data, model['cost_scaling'])
elif model['cost_scaling'] is None:
cost_scaling[m] = np.zeros((N, 1))
# define custom markup model or user supplied markups
custom_model[m] = model['custom_model_specification']
if model["user_supplied_markups"] is not None:
user_supplied_markups[m] = extract_matrix(product_data, model["user_supplied_markups"])
user_supplied_markups_name[m] = model["user_supplied_markups"]
# structure product fields as a mapping
models_mapping: Dict[Union[str, tuple], Optional[Array]] = {}
models_mapping.update({
'models_downstream': models_downstream,
'models_upstream': models_upstream,
'firm_ids_downstream': firm_ids_downstream,
'firm_ids_upstream': firm_ids_upstream,
'ownership_downstream': ownership_matrices_downstream,
'ownership_upstream': ownership_matrices_upstream,
'vertical_integration': vertical_integration,
'vertical_integration_index': vertical_integration_index,
'unit_tax': unit_tax,
'unit_tax_name': unit_tax_name,
'advalorem_tax': advalorem_tax,
'advalorem_tax_name': advalorem_tax_name,
'advalorem_payer': advalorem_payer,
'cost_scaling_column': cost_scaling_column,
'cost_scaling': cost_scaling,
'custom_model_specification': custom_model,
'user_supplied_markups': user_supplied_markups,
'user_supplied_markups_name': user_supplied_markups_name
})
return models_mapping
class Container(abc.ABC):
"""An abstract container for structured product and instruments data."""
products: RecArray
models: RecArray
_w_formulation: Tuple[ColumnFormulation, ...]
_Z_formulation: Tuple[ColumnFormulation, ...]
Dict_Z_formulation: Dict[Union[str, tuple], Tuple[Optional[Array], Any]] = {}
@abc.abstractmethod
def __init__(self, products: RecArray, models: RecArray) -> None:
"""Store data and column formulations."""
self.products = products
self.models = models
self._w_formulation = self.products.dtype.fields['w'][2]
i = 0
while 'Z{0}'.format(i) in self.products.dtype.fields:
self._Z_formulation = self.products.dtype.fields['Z{0}'.format(i)][2]
self.Dict_Z_formulation.update({"_Z{0}_formulation".format(i): self._Z_formulation})
i += 1
def read_critical_values_tables():
"""Read in the critical values for size and power from the corresponding csv file. These will be used to evaluate
the strength of the instruments."""
# read in data for critical values for size as a structured array
critical_values_size = np.genfromtxt(
F_CRITICAL_VALUES_SIZE_RHO,
delimiter=',',
skip_header=1,
dtype=[('K', 'i4'), ('rho', 'f8'), ('r_075', 'f8'), ('r_10', 'f8'), ('r_125', 'f8')]
)
# read in data for critical values for power as a structured array
critical_values_power = np.genfromtxt(
F_CRITICAL_VALUES_POWER_RHO,
delimiter=',',
skip_header=1,
dtype=[('K', 'i4'), ('rho', 'f8'), ('r_50', 'f8'), ('r_75', 'f8'), ('r_95', 'f8')]
)
return critical_values_power, critical_values_size
| 18,680 | 44.014458 | 120 | py |
pyRVtest | pyRVtest-main/pyRVtest/options.py | r"""Global options.
Attributes
----------
digits : `int`
Number of digits displayed by status updates. The default number of digits is ``7``. The number of digits can be
changed to, for example, ``2``, with ``pyblp.options.digits = 2``.
verbose : `bool`
Whether to output status updates. By default, verbosity is turned on. Verbosity can be turned off with
``pyblp.options.verbose = False``.
verbose_tracebacks : `bool`
Whether to include full tracebacks in error messages. By default, full tracebacks are turned off. These can be
useful when attempting to find the source of an error message. Tracebacks can be turned on with
``pyblp.options.verbose_tracebacks = True``.
verbose_output : `callable`
Function used to output status updates. The default function is simply ``print``. The function can be changed, for
example, to include an indicator that statuses are from this package, with
``pyblp.verbose_output = lambda x: print(f"pyblp: {x}")``.
flush_output : `bool`
Whether to call ``sys.stdout.flush()`` after outputting a status update. By default, output is not flushed to
standard output. To force standard output flushes after every status update, set
``pyblp.options.flush_output = True``. This may be particularly desirable for R users who are calling PyBLP from
`reticulate <https://github.com/rstudio/reticulate>`_, since standard output is typically not automatically flushed
to the screen in this environment. If PyBLP is imported as ``pyblp``, this setting can be enabled in R with
``pyblp$options$flush_output <- TRUE``.
dtype : `dtype`
The data type used for internal calculations, which is by default ``numpy.float64``. The other recommended option is
``numpy.longdouble``, which is the only extended precision floating point type currently supported by NumPy.
Although this data type will be used internally, ``numpy.float64`` will be used when passing arrays to optimization
and fixed point routines, which may not support extended precision. The library underlying :mod:`scipy.linalg`,
which is used for matrix inversion, may also use ``numpy.float64``.
One instance in which extended precision can be helpful in the BLP problem is when there are a large number of near
zero choice probabilities with small integration weights, which, under standard precision are called zeros when in
aggregate they are nonzero. For example, :ref:`references: Skrainka (2012)` finds that using long doubles is
sufficient to solve many utility floating point problems.
The precision of ``numpy.longdouble`` depends on the platform on which NumPy is installed. If the platform in use
does not support extended precision, using ``numpy.longdouble`` may lead to unreliable results. For example, on
Windows, NumPy is usually compiled such that ``numpy.longdouble`` often behaves like ``numpy.float64``. Precisions
can be compared with :class:`numpy.finfo` by running ``numpy.finfo(numpy.float64)`` and
``numpy.finfo(numpy.longdouble)``. For more information, refer to
`this discussion <https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html#extended-precision>`_.
If extended precisions is supported, the data type can be switched with ``pyblp.options.dtype = numpy.longdouble``.
On Windows, it is often easier to install Linux in a virtual machine than it is to build NumPy from source with a
non-standard compiler.
finite_differences_epsilon : `float`
Perturbation :math:`\epsilon` used to numerically approximate derivatives with central finite differences:
.. math:: f'(x) = \frac{f(x + \epsilon / 2) - f(x - \epsilon / 2)}{\epsilon}.
By default, this is the square root of the machine epsilon: ``numpy.sqrt(numpy.finfo(options.dtype).eps)``. The
typical example where this is used is when computing the Hessian, but it may also be used to compute Jacobians
required for standard errors when analytic gradients are disabled.
pseudo_inverses : `bool`
Whether to compute Moore-Penrose pseudo-inverses of matrices with :func:`scipy.linalg.pinv` instead of their classic
inverses with :func:`scipy.linalg.inv`. This is by default ``True``, so pseudo-inverses will be used. Up to small
numerical differences, the pseudo-inverse is identical to the classic inverse for invertible matrices. Using the
pseudo-inverse by default can help alleviate problems from, for example, near-singular weighting matrices.
To always attempt to compute classic inverses first, set ``pyblp.options.pseudo_inverses = False``. If a classic
inverse cannot be computed, an error will be displayed, and a pseudo-inverse may be computed instead.
collinear_atol : `float`
Absolute tolerance for detecting collinear columns in each matrix of product characteristics and instruments:
:math:`X_1`, :math:`X_2`, :math:`X_3`, :math:`Z_D`, and :math:`Z_S`.
Each matrix is decomposed into a :math:`QR` decomposition and an error is raised for any column whose diagonal
element in :math:`R` has a magnitude less than ``collinear_atol + collinear_rtol * sd`` where ``sd`` is the column's
standard deviation.
The default absolute tolerance is ``1e-14``. To disable collinearity checks, set
``pyblp.options.collinear_atol = pyblp.options.collinear_rtol = 0``.
collinear_rtol : `float`
Relative tolerance for detecting collinear columns, which is by default also ``1e-14``.
psd_atol : `float`
Absolute tolerance for detecting non-positive semidefinite matrices. For example, this check is applied to any
custom weighting matrix, :math:`W`.
Singular value decomposition factorizes the matrix into :math:`U \Sigma V` and an error is raised if any element in
the original matrix differs in absolute value from :math:`V' \Sigma V` by more than ``psd_atol + psd_rtol * abs``
where ``abs`` is the element's absolute value.
The default tolerance is ``1e-8``. To disable positive semidefinite checks, set
``pyblp.options.psd_atol = pyblp.options.psd_rtol = numpy.inf``.
psd_rtol : `float`
Relative tolerance for detecting non-positive definite matrices, which is by default also ``1e-8``.
"""
import numpy as _np
digits = 7
verbose = True
verbose_tracebacks = False
verbose_output = print
flush_output = False
dtype = _np.float64
finite_differences_epsilon = _np.sqrt(_np.finfo(dtype).eps)
pseudo_inverses = True
collinear_atol = collinear_rtol = 1e-14
psd_atol = psd_rtol = 1e-8
ndraws = 99999
random_seed = 1
| 6,539 | 57.392857 | 120 | py |
pyRVtest | pyRVtest-main/pyRVtest/construction.py | """Data construction."""
import contextlib
import os
from pathlib import Path
import pickle
from typing import Any, Callable, Mapping, Optional, Union
import numpy as np
from numpy.linalg import inv
from pyblp.utilities.basics import Array, RecArray, extract_matrix, get_indices
from . import options
def build_ownership(
product_data: Mapping, firm_ids_column_name: str,
kappa_specification: Optional[Union[str, Callable[[Any, Any], float]]] = None) -> Array:
r"""Build ownership matrices, :math:`O`.
Ownership or product holding matrices are defined by their cooperation matrix counterparts, :math:`\kappa`. For each
market :math:`t`, :math:`\mathscr{H}_{jk} = \kappa_{fg}` where :math:`j \in J_{ft}`, the set of products
produced by firm :math:`f` in the market, and similarly, :math:`g \in J_{gt}`.
.. note::
This function is a copy of the function from PyBLP, with a slight change. In order to allow upstream and
downstream firms to have different ownership structures, the user can pass in the names of the columns
corresponding to firm ids for downstream and upstream firms.
Parameters
----------
product_data : `structured array-like`
Each row corresponds to a product. Markets can have differing numbers of products. The following fields are
required (except for ``firm_ids`` when ``kappa_specification`` is one of the special cases):
- **market_ids** : (`object`) - IDs that associate products with markets.
firm_ids_column_name: column in product_data with firm ids that associate products with firms. This field is ignored
if ``kappa_specification`` is one of the special cases and not a function.
kappa_specification : `str or callable, optional`
Specification for each market's cooperation matrix, :math:`\kappa`, which can either be a general function or a
string that implements a special case. The general function is is of the following form::
kappa(f, g) -> value
where ``value`` is :math:`\mathscr{H}_{jk}` and both ``f`` and ``g`` are firm IDs from the ``firm_ids`` field of
``product_data``.
The default specification, ``lambda: f, g: int(f == g)``, constructs traditional ownership matrices. That is,
:math:`\kappa = I`, the identity matrix, implies that :math:`\mathscr{H}_{jk}` is :math:`1` if the same firm
produces products :math:`j` and :math:`k`, and is :math:`0` otherwise.
If ``firm_ids`` happen to be indices for an actual :math:`\kappa` matrix, ``lambda f, g: kappa[f, g]`` will
build ownership matrices according to the matrix ``kappa``.
When one of the special cases is specified, ``firm_ids`` in ``product_data`` are not required and if specified
will be ignored:
- ``'monopoly'`` - Monopoly ownership matrices are all ones: :math:`\mathscr{H}_{jk} = 1` for all :math:`j`
and :math:`k`.
- ``'single'`` - Single product firm ownership matrices are identity matrices: :math:`\mathscr{H}_{jk} = 1`
if :math:`j = k` and :math:`0` otherwise.
Returns
-------
`ndarray`
Stacked :math:`J_t \times J_t` ownership matrices, :math:`\mathscr{H}`, for each market :math:`t`. If a market
has fewer products than others, extra columns will contain ``numpy.nan``.
"""
# validate or use the default kappa specification
if kappa_specification is None:
kappa_specification = lambda f, g: np.where(f == g, 1, 0).astype(options.dtype)
elif callable(kappa_specification):
kappa_specification = np.vectorize(kappa_specification, [options.dtype])
elif kappa_specification not in {'monopoly', 'single'}:
raise ValueError("kappa_specification must be None, callable, 'monopoly', or 'single'.")
# extract and validate IDs
market_ids = extract_matrix(product_data, 'market_ids')
firm_ids = extract_matrix(product_data, firm_ids_column_name)
if market_ids is None:
raise KeyError("product_data must have a market_ids field.")
if market_ids.shape[1] > 1:
raise ValueError("The market_ids field of product_data must be one-dimensional.")
if callable(kappa_specification):
if firm_ids is None:
raise KeyError(
"product_data must have a firm_ids field or firm_ids_column_name must be specified when "
"kappa_specification is not a special case."
)
if firm_ids.shape[1] > 1:
raise ValueError("The firm_ids field of product_data must be one-dimensional.")
# determine the overall number of products and the maximum number in a market
market_indices = get_indices(market_ids)
N = market_ids.size
max_J = max(i.size for i in market_indices.values())
# construct the ownership matrices
ownership = np.full((N, max_J), np.nan, options.dtype)
for indices_t in market_indices.values():
if kappa_specification == 'monopoly':
ownership[indices_t, :indices_t.size] = 1
elif kappa_specification == 'single':
ownership[indices_t, :indices_t.size] = np.eye(indices_t.size)
else:
assert callable(kappa_specification) and firm_ids is not None
ids_t = firm_ids[indices_t]
tiled_ids_t = np.tile(np.c_[ids_t], ids_t.size)
ownership[indices_t, :indices_t.size] = kappa_specification(tiled_ids_t, tiled_ids_t.T)
return ownership
def build_markups(
product_data: RecArray, pyblp_results: Mapping, model_downstream: Optional[Array],
ownership_downstream: Optional[Array], model_upstream: Optional[Array] = None,
ownership_upstream: Optional[Array] = None, vertical_integration: Optional[Array] = None,
custom_model_specification: Optional[dict] = None, user_supplied_markups: Optional[Array] = None) -> Array:
r"""This function computes markups for a large set of standard models.
The models that this package is able to compute markups for include:
- standard bertrand with ownership matrix based on firm id
- price setting with arbitrary ownership matrix (e.g. profit weight model)
- standard cournot with ownership matrix based on firm id
- quantity setting with arbitrary ownership matrix (e.g. profit weight model)
- monopoly
- bilateral oligopoly with any combination of the above models upstream and downstream
- bilateral oligopoly as above but with subset of products vertically integrated
- any of the above with consumer surplus weights
In order to compute markups, the products data and PyBLP demand estimation results must be specified, as well as at
least a model of downstream conduct. If `model_upstream` is not specified, this is a model without vertical
integration.
Parameters
----------
product_data : `recarray`
The `product_data` containing information on markets and product characteristics. This should be the same as
the data used for demand estimation. To compute markups, this data must include `prices`, `market_ids`, and
`shares`.
pyblp_results : `structured array-like`
The results object obtained from using the pyBLP demand estimation procedure. We use built-in PyBLP
functions to return the demand Jacobians and Hessians (first and second derivatives of shares with respect
to prices).
model_downstream: `ndarray`
The model of conduct for downstream firms. Can be one of [`bertrand`, `cournot`, `monopoly`,
`perfect_competition`, `other`]. Only specify option `other` if supplying a custom markup formula.
ownership_downstream: `ndarray`
The ownership matrix for price or quantity setting (optional, default is standard ownership).
model_upstream: `ndarray, optional`
Upstream firm model of conduct. Only specify option `other` if supplying a custom markup formula. Can be one
of ['none' (default), `bertrand`, `cournot`, `monopoly`, `perfect_competition`, `other`].
ownership_upstream: `ndarray, optional`
Ownership matrix for price or quantity setting of upstream firms (optional, default is None).
vertical_integration: `ndarray, optional`
Vector indicating which `product_ids` are vertically integrated (ie store brands) (optional, default is
None).
custom_model_specification: `dict, optional`
Dictionary containing a custom markup formula and the name of the formula (optional, default is None).
user_supplied_markups: `ndarray, optional`
Vector containing user-computed markups (optional, default is None). If user supplied own markups, this
function simply returns them.
Returns
-------
`tuple[list, list, list]`
. Computed markups, downstream markups, and upstream markups for each model.
Notes
_____
For models without vertical integration, firm_ids must be defined in product_data for vi models, and
firm_ids_upstream and firm_ids (=firm_ids_downstream) must be defined.
"""
# initialize market characteristics
N = product_data.shape[0]
number_models = len(model_downstream)
markets = np.unique(product_data.market_ids)
# initialize markups
markups = [None] * number_models
markups_upstream = [None] * number_models
markups_downstream = [None] * number_models
for i in range(number_models):
markups_downstream[i] = np.zeros((N, 1), dtype=options.dtype)
markups_upstream[i] = np.zeros((N, 1), dtype=options.dtype)
# precompute demand jacobians
with contextlib.redirect_stdout(open(os.devnull, 'w')):
ds_dp = pyblp_results.compute_demand_jacobians()
# compute markups market-by-market
for i in range(number_models):
if user_supplied_markups[i] is not None:
markups[i] = user_supplied_markups[i]
markups_downstream[i] = user_supplied_markups[i]
else:
for t in markets:
index_t = np.where(pyblp_results.problem.products['market_ids'] == t)[0]
shares_t = product_data.shares[index_t]
retailer_response_matrix = ds_dp[index_t]
retailer_response_matrix = retailer_response_matrix[:, ~np.isnan(retailer_response_matrix).all(axis=0)]
# compute downstream markups for model i market t
markups_downstream[i], retailer_ownership_matrix = evaluate_first_order_conditions(
index_t, model_downstream[i], ownership_downstream[i], retailer_response_matrix, shares_t,
markups_downstream[i], custom_model_specification[i], markup_type='downstream'
)
# compute upstream markups (if applicable) following formula in Villas-Boas (2007)
if not (model_upstream[i] is None):
# construct the matrix of derivatives with respect to prices for other manufacturers
markups_t = markups_downstream[i][index_t]
passthrough_matrix = construct_passthrough_matrix(
pyblp_results, t, retailer_response_matrix, retailer_ownership_matrix, markups_t
)
# solve for matrix of cross-price elasticities of derived demand and the effects of cost
# pass-through
manufacturer_response_matrix = np.transpose(passthrough_matrix) @ retailer_response_matrix
# compute upstream markups
markups_upstream[i], manufacturer_ownership_matrix = evaluate_first_order_conditions(
index_t, model_upstream[i], ownership_upstream[i], manufacturer_response_matrix, shares_t,
markups_upstream[i], custom_model_specification[i], markup_type='upstream'
)
# compute total markups as sum of upstream and downstream markups, taking into account vertical integration
for i in range(number_models):
if user_supplied_markups[i] is None:
if vertical_integration[i] is None:
vi = np.ones((N, 1))
else:
vi = (vertical_integration[i] - 1) ** 2
markups[i] = markups_downstream[i] + vi * markups_upstream[i]
return markups, markups_downstream, markups_upstream
def construct_passthrough_matrix(
pyblp_results, market_id, retailer_response_matrix, retailer_ownership_matrix, markups_t):
"""Construct the passthrough matrix using the formula from Villas-Boas (2007). This matrix contains the derivatives
of all retail prices with respect to all wholesale prices."""
# compute demand hessians
with contextlib.redirect_stdout(open(os.devnull, 'w')):
d2s_dp2_t = pyblp_results.compute_demand_hessians(market_id=market_id)
# compute the product of demand hessians and markups
J = len(markups_t)
g = np.zeros((J, J))
for j in range(J):
g[:, [j]] = (retailer_ownership_matrix * d2s_dp2_t[:, :, j]) @ markups_t
# solve for derivatives of all prices with respect to the wholesale prices
H = np.transpose(retailer_ownership_matrix * retailer_response_matrix)
G = retailer_response_matrix + H + g
return inv(G) @ H
def evaluate_first_order_conditions(
index, model_type, type_ownership_matrix, response_matrix, shares, markups, custom_model_specification,
markup_type):
"""Compute markups for some standard models including Bertrand, Cournot, monopoly, and perfect competition using
the first order conditions corresponding to each model. Allow user to pass in their own markup function as well.
"""
if (markup_type == 'downstream') or (markup_type == 'upstream' and model_type is not None):
# construct ownership matrix
ownership_matrix = type_ownership_matrix[index]
ownership_matrix = ownership_matrix[:, ~np.isnan(ownership_matrix).all(axis=0)]
# compute markups based on specified model first order condition
if model_type == 'bertrand':
markups[index] = -inv(ownership_matrix * response_matrix) @ shares
elif model_type == 'cournot':
markups[index] = -(ownership_matrix * inv(response_matrix)) @ shares
elif model_type == 'monopoly':
markups[index] = -inv(response_matrix) @ shares
elif model_type == 'perfect_competition':
markups[index] = np.zeros((len(shares), 1))
else:
if custom_model_specification is not None:
custom_model, custom_model_formula = next(iter(custom_model_specification.items()))
markups[index] = eval(custom_model_formula)
return markups, ownership_matrix
def read_pickle(path: Union[str, Path]) -> object:
"""Load a pickled object into memory.
This is a simple wrapper around `pickle.load`.
Parameters
----------
path : `str or Path`
File path of a pickled object.
Returns
-------
`object`
The unpickled object.
"""
with open(path, 'rb') as handle:
return pickle.load(handle)
| 15,264 | 48.083601 | 120 | py |
pyRVtest | pyRVtest-main/pyRVtest/__init__.py | """Public-facing objects."""
from . import data, options
from .configurations.formulation import Formulation, ModelFormulation
from .construction import (
build_ownership, build_markups, construct_passthrough_matrix, evaluate_first_order_conditions, read_pickle
)
from .economies.problem import Problem
from .primitives import Models, Products
from .results.problem_results import ProblemResults
from .version import __version__
__all__ = [
'data', 'options', 'build_ownership', 'build_markups', 'construct_passthrough_matrix',
'evaluate_first_order_conditions', 'read_pickle', 'Formulation', 'ModelFormulation', 'Problem', 'Models',
'Products', 'ProblemResults', '__version__'
]
| 699 | 37.888889 | 110 | py |
pyRVtest | pyRVtest-main/pyRVtest/results/problem_results.py | """Economy-level structuring of conduct testing problem results."""
from pathlib import Path
import pickle
from typing import List, Union, TYPE_CHECKING
from pyblp.utilities.basics import Array
from .results import Results
from ..utilities.basics import format_table
# only import objects that create import cycles when checking types
if TYPE_CHECKING:
from ..economies.problem import Progress
class ProblemResults(Results):
r"""Results of running the firm conduct testing procedures.
Attributes
----------
problem: `ndarray`
An instance of the Problem class.
markups: `ndarray`
Array of the total markups implied by each model (sum of retail and wholesale markups).
markups_downstream: `ndarray`
Array of the retail markups implied by each model.
markups_upstream: `ndarray`
Array of the manufacturer markups implied by each model of double marginalization.
taus: `ndarray`
Array of coefficients from regressing implied marginal costs for each model on observed cost shifters.
mc: `ndarray`
Array of implied marginal costs for each model.
g: `ndarray`
Array of moments for each model and each instrument set of conduct between implied residualized cost
unobservable and the instruments.
Q: `ndarray`
Array of lack of fit given by GMM objective function with 2SLS weight matrix for each set of instruments and
each model.
RV_numerator: `ndarray`
Array of numerators of pairwise RV test statistics for each instrument set and each pair of models.
RV_denominator: `ndarray`
Array of denominators of pairwise RV test statistics for each instrument set and each pair of models.
TRV: `ndarray`
Array of pairwise RV test statistics for each instrument set and each pair of models.
F: `ndarray`
Array of pairwise F-statistics for each instrument set and each pair of models.
MCS_pvalues: `ndarray`
Array of MCS p-values for each instrument set and each model.
rho: `ndarray`
Scaling parameter for F-statistics.
unscaled_F: `ndarray`
Array of pairwise F-statistics without scaling by rho.
F_cv_size_list: `ndarray`
Vector of critical values for size for each pairwise F-statistic.
F_cv_power_list: `ndarray`
Vector of critical values for power for each pairwise F-statistic.
"""
problem: Array
markups: Array
markups_downstream: Array
markups_upstream: Array
taus: Array
mc: Array
g: Array
Q: Array
RV_numerator: Array
RV_denominator: Array
TRV: Array
F: Array
MCS_pvalues: Array
rho: Array
unscaled_F: Array
F_cv_size_list: Array
F_cv_power_list: Array
_symbols_size_list: Array
_symbols_power_list: Array
def __init__(self, progress: 'Progress') -> None:
self.problem = progress.problem
self.markups = progress.markups
self.markups_downstream = progress.markups_downstream
self.markups_upstream = progress.markups_upstream
self.taus = progress.tau_list
self.mc = progress.mc
self.g = progress.g
self.Q = progress.Q
self.RV_numerator = progress.RV_numerator
self.RV_denominator = progress.RV_denominator
self.TRV = progress.test_statistic_RV
self.F = progress.F
self.MCS_pvalues = progress.MCS_pvalues
self.rho = progress.rho
self.unscaled_F = progress.unscaled_F
self.F_cv_size_list = progress.F_cv_size_list
self.F_cv_power_list = progress.F_cv_power_list
self._symbols_size_list = progress.symbols_size_list
self._symbols_power_list = progress.symbols_power_list
def __str__(self) -> str:
"""Format results information as a string."""
out = ""
for i in range(len(self.TRV)):
tmp = "\n\n".join([self._format_results_tables(i)])
out = "\n\n".join([out, tmp])
return out
def _format_results_tables(self, j: int) -> str:
"""Formation information about the testing results as a string."""
# construct the data
data: List[List[str]] = []
number_models = len(self.markups)
for k in range(number_models):
rv_results = [round(self.TRV[j][k, i], 3) for i in range(number_models)]
f_stat_results = [round(self.F[j][k, i], 1) for i in range(number_models)]
pvalues_results = [str(round(self.MCS_pvalues[j][k][0], 3))]
symbols_results = [
self._symbols_size_list[j][k, i] + " " + self._symbols_power_list[j][k, i] for i in range(number_models)
]
data.append([str(k)] + rv_results + [str(k)] + f_stat_results + [str(k)] + pvalues_results)
data.append([""] + ["" for i in range(number_models)] + [""] + symbols_results + [""] + [""])
# construct the header
blanks = [f" " for i in range(number_models)]
numbers = [f" {i} " for i in range(number_models)]
header = [" TRV: "] + blanks + [" F-stats: "] + blanks + [" MCS: "] + [" "]
subheader = [" models "] + numbers + [" models "] + numbers + [" models "] + ["MCS p-values"]
# if on the last table, set table notes to true
last_table = False
if j == (len(self.TRV) - 1):
last_table = True
return format_table(
header, subheader, *data, title="Testing Results - Instruments z{0}".format(j), include_notes=last_table,
line_indices=[number_models, 2 * number_models + 1]
)
def to_pickle(self, path: Union[str, Path]) -> None:
"""Save these results as a pickle file. This function is copied from PyBLP.
Parameters
----------
path: `str or Path`
File path to which these results will be saved.
"""
with open(path, 'wb') as handle:
pickle.dump(self, handle)
| 6,114 | 38.96732 | 120 | py |
pyRVtest | pyRVtest-main/pyRVtest/results/results.py | """Economy-level structuring of abstract BLP problem results."""
import abc
from typing import Any, Optional, TYPE_CHECKING
import numpy as np
from pyblp.utilities.basics import Array, StringRepresentation
# only import objects that create import cycles when checking types
if TYPE_CHECKING:
from ..economies.problem import ProblemEconomy
class Results(abc.ABC, StringRepresentation):
"""Abstract results of a solved BLP problem."""
problem: 'ProblemEconomy'
def __init__(
self, problem: 'ProblemEconomy') -> None:
"""Store the underlying problem and parameter information."""
self.problem = problem
def _select_market_ids(self, market_id: Optional[Any] = None) -> Array:
"""Select either a single market ID or all unique IDs."""
if market_id is None:
return self.problem.unique_market_ids
if market_id in self.problem.unique_market_ids:
return np.array(market_id, np.object)
raise ValueError(f"market_id must be None or one of {list(sorted(self.problem.unique_market_ids))}.")
| 1,092 | 32.121212 | 109 | py |
pyRVtest | pyRVtest-main/pyRVtest/results/__init__.py | """Structuring of conduct testing results."""
| 46 | 22.5 | 45 | py |
pyRVtest | pyRVtest-main/pyRVtest/economies/problem.py | """Economy-level conduct testing problem functionality."""
import abc
import contextlib
import itertools
import math
import os
import time
from typing import Mapping, Optional, Sequence
import numpy as np
from pyblp.utilities.algebra import precisely_identify_collinearity
from pyblp.utilities.basics import Array, RecArray, format_seconds, output
from scipy.linalg import inv, fractional_matrix_power
from scipy.stats import norm
import statsmodels.api as sm
from .economy import Economy
from .. import options
from ..configurations.formulation import Formulation, ModelFormulation
from ..construction import build_markups
from ..primitives import Models, Products, read_critical_values_tables
from ..results.problem_results import ProblemResults
class ProblemEconomy(Economy):
"""An abstract firm conduct testing problem."""
@abc.abstractmethod
def __init__(
self, cost_formulation: Formulation, instrument_formulation: Sequence[Formulation],
model_formulations: Sequence[ModelFormulation], products: RecArray, models: RecArray,
demand_results: Mapping, markups: RecArray) -> None:
"""Initialize the underlying economy with product and agent data."""
super().__init__(
cost_formulation, instrument_formulation, model_formulations, products, models, demand_results, markups
)
def solve(
self, demand_adjustment: Optional[bool] = False, clustering_adjustment: Optional[bool] = False
) -> ProblemResults:
r"""Solve the problem.
Given demand estimates from PyBLP, we compute implied markups for each model :math:`m` being tested. Marginal
cost is a linear function of observed cost shifters and an unobserved shock.
The rest of the testing procedure is done for each pair of models, for each set of instruments. A GMM measure of
fit is computed for each model-instrument pair. This measure of fit is used to construct the test statistic.
Parameters
----------
demand_adjustment: Optional[bool]
(optional, default is False) Configuration that allows user to specify whether to compute a two-step demand
adjustment. Options are True or False.
clustering_adjustment: Optional[str]
(optional, default is unadjusted) Configuration that specifies whether to compute clustered standard errors.
Options are True or False.
Returns
-------
`ProblemResults`
:class:`ProblemResults` of the solved problem.
"""
# keep track of how long it takes to solve the problem
output("Solving the problem ...")
step_start_time = time.time()
# initialize constants and precomputed values
M = self.M
N = self.N
L = self.L
markups = self.markups
critical_values_power, critical_values_size = read_critical_values_tables()
# validate settings
if not isinstance(demand_adjustment, bool):
raise TypeError("demand_adjustment must be a boolean (one of True or False).")
if not isinstance(clustering_adjustment, bool):
raise TypeError("clustering_adjustment must be a boolean (one of True or False).")
if clustering_adjustment and np.shape(self.products.clustering_ids)[1] != 1:
raise ValueError("product_data.clustering_ids must be specified with clustering_adjustment True.")
for m in range(M):
if self.model_formulations[m]._user_supplied_markups is not None:
if clustering_adjustment or demand_adjustment:
raise ValueError(
"If using own markups, demand_adjustment and clustering_adjustment should be False."
)
# initialize variables to be computed
markups_upstream = np.zeros(M, dtype=options.dtype)
markups_downstream = np.zeros(M, dtype=options.dtype)
markups_orthogonal = np.zeros((M, N), dtype=options.dtype)
marginal_cost_orthogonal = np.zeros((M, N), dtype=options.dtype)
tau_list = np.zeros((M, self.products.w.shape[1]), dtype=options.dtype)
markups_errors = np.zeros(M, dtype=options.dtype)
marginal_cost_errors = np.zeros(M, dtype=options.dtype)
# initialize tax-related variables
markups_effective = [None] * M
markups_out = [None] * M
advalorem_tax_adj = [None] * M
# if there are no markups, compute them
if markups[0] is None:
print('Computing Markups ... ')
markups, markups_downstream, markups_upstream = build_markups(
self.products, self.demand_results, self.models["models_downstream"],
self.models["ownership_downstream"], self.models["models_upstream"], self.models["ownership_upstream"],
self.models["vertical_integration"], self.models["custom_model_specification"],
self.models["user_supplied_markups"]
)
# for each model, use computed markups to compute the marginal costs
marginal_cost = self.products.prices - markups
# for the setting with taxes, adjust the markup computation to account for marginal costs
unit_tax = self.models["unit_tax"]
advalorem_tax = self.models["advalorem_tax"]
cost_scaling = self.models["cost_scaling"]
for m in range(M):
condition = self.models["advalorem_payer"][m] == "consumer"
advalorem_tax_adj[m] = 1 / (1 + advalorem_tax[m]) if condition else (1 - advalorem_tax[m])
numerator = (advalorem_tax_adj[m] * self.products.prices - advalorem_tax_adj[m] * markups[m] - unit_tax[m])
denominator = (1 + cost_scaling[m] * advalorem_tax_adj[m])
marginal_cost[m] = numerator / denominator
markups_out[m] = (markups[m] + cost_scaling[m] * marginal_cost[m]) * advalorem_tax_adj[m]
markups_effective[m] = self.products.prices - marginal_cost[m]
# absorb any cost fixed effects from prices, markups, and instruments
if self._absorb_cost_ids is not None:
output("Absorbing cost-side fixed effects ...")
self.products.w, w_errors = self._absorb_cost_ids(self.products.w)
prices_orthogonal, prices_errors = self._absorb_cost_ids(self.products.prices)
for m in range(M):
value, error = self._absorb_cost_ids(markups_effective[m])
markups_orthogonal[m] = np.squeeze(value)
markups_errors[m] = np.nan if not error else error
value, error = self._absorb_cost_ids(marginal_cost[m])
marginal_cost_orthogonal[m] = np.squeeze(value)
marginal_cost_errors[m] = np.nan if not error else error
else:
prices_orthogonal = self.products.prices
markups_orthogonal = markups_effective
marginal_cost_orthogonal = marginal_cost
# residualize prices, markups, and instruments w.r.t cost shifters w and recover the tau parameters in cost
# regression on w
results = sm.OLS(prices_orthogonal, self.products.w).fit()
prices_orthogonal = np.reshape(results.resid, [N, 1])
for m in range(M):
results = sm.OLS(markups_orthogonal[m], self.products.w).fit()
markups_orthogonal[m] = results.resid
results = sm.OLS(marginal_cost_orthogonal[m], self.products.w).fit()
tau_list[m] = results.params
# if user specifies demand adjustment, account for two-step estimation in the standard errors by computing the
# finite difference approximation to the derivative of markups with respect to theta
if demand_adjustment:
ZD = self.demand_results.problem.products.ZD
WD = self.demand_results.updated_W
h = self.demand_results.moments
h_i = ZD * self.demand_results.xi
K2 = self.demand_results.problem.K2
D = self.demand_results.problem.D
# check whether price is in linear parameters, and if it is remove it
XD = self.demand_results.problem.products.X1
XD_column_names = self.demand_results.problem.products.dtype.fields['X1'][2]
price_in_linear_parameters = 'prices' in XD_column_names
if price_in_linear_parameters:
XD = np.delete(XD, XD_column_names.index('prices'), 1)
# add price to the gradient
partial_y_theta = (np.append(
self.demand_results.xi_by_theta_jacobian, -self.demand_results.problem.products.prices, axis=1
) if price_in_linear_parameters else self.demand_results.xi_by_theta_jacobian)
# absorb fixed effects if they are specified
if self.demand_results.problem.ED > 0:
partial_y_theta = self.demand_results.problem._absorb_demand_ids(partial_y_theta)
partial_y_theta = np.reshape(
partial_y_theta[0], [N, len(self.demand_results.theta) + int(price_in_linear_parameters)]
)
# if there are linear parameters, adjust them
if not XD.shape[1]:
partial_xi_theta = partial_y_theta
else:
product = XD @ inv(XD.T @ ZD @ WD @ ZD.T @ XD) @ (XD.T @ ZD @ WD @ ZD.T @ partial_y_theta)
partial_xi_theta = partial_y_theta - product
H = 1 / N * (np.transpose(ZD) @ partial_xi_theta)
H_prime = np.transpose(H)
H_prime_wd = H_prime @ WD
# build adjustment to psi for each model
epsilon = options.finite_differences_epsilon
G_m = [None] * M
gradient_markups = np.zeros(
(M, N, len(self.demand_results.theta) + int(price_in_linear_parameters)), dtype=options.dtype
)
# compute sigma
theta_index = 0
delta_estimate = self.demand_results.delta
def markups_computation(markups_m):
"""Compute markups for setting with taxes."""
denominator = (1 + cost_scaling[m] * advalorem_tax_adj[m])
computation = (
advalorem_tax_adj[m] * self.products.prices - advalorem_tax_adj[m] * markups_m - unit_tax[m]
)
return self.products.prices - computation / denominator
# loop over pairs of nonlinear demand characteristics, and recompute markups with perturbations if
# the demand coefficient estimates for sigma are not zero
for (i, j) in itertools.product(range(K2), range(K2)):
if not self.demand_results.sigma[i, j] == 0:
sigma_initial = self.demand_results.sigma[i, j]
# reduce sigma by small increment, update delta, and recompute markups
self.demand_results._sigma[i, j] = sigma_initial - epsilon / 2
with contextlib.redirect_stdout(open(os.devnull, 'w')):
delta_new = self.demand_results.compute_delta()
self.demand_results._delta = delta_new
markups_l, md, ml = build_markups(
self.products, self.demand_results, self.models["models_downstream"],
self.models["ownership_downstream"], self.models["models_upstream"],
self.models["ownership_upstream"], self.models["vertical_integration"],
self.models["custom_model_specification"], self.models["user_supplied_markups"]
)
# increase sigma by small increment, update delta, and recompute markups
self.demand_results._sigma[i, j] = sigma_initial + epsilon / 2
with contextlib.redirect_stdout(open(os.devnull, 'w')):
delta_new = self.demand_results.compute_delta()
self.demand_results._delta = delta_new
markups_u, mu, mu = build_markups(
self.products, self.demand_results, self.models["models_downstream"],
self.models["ownership_downstream"], self.models["models_upstream"],
self.models["ownership_upstream"], self.models["vertical_integration"],
self.models["custom_model_specification"], self.models["user_supplied_markups"]
)
# compute markup perturbations for taxes
for m in range(M):
markups_u[m] = markups_computation(markups_u[m])
markups_l[m] = markups_computation(markups_l[m])
# compute first difference approximation of derivative of markups
gradient_markups = self._compute_first_difference_markups(
markups_u, markups_l, epsilon, theta_index, gradient_markups
)
self.demand_results._sigma[i, j] = sigma_initial
theta_index = theta_index + 1
# loop over nonlinear demand characteristics and demographics, and recompute markups with perturbations if
# the demand coefficient estimates for pi are not zero
for (i, j) in itertools.product(range(K2), range(D)):
if not self.demand_results.pi[i, j] == 0:
pi_initial = self.demand_results.pi[i, j]
perturbations = [pi_initial - epsilon / 2, pi_initial + epsilon / 2]
markups_l, md, ml = self._compute_perturbation(i, j, perturbations[0])
markups_u, mu, mu = self._compute_perturbation(i, j, perturbations[1])
for m in range(M):
markups_u[m] = markups_computation(markups_u[m])
markups_l[m] = markups_computation(markups_l[m])
gradient_markups = self._compute_first_difference_markups(
markups_u, markups_l, epsilon, theta_index, gradient_markups
)
self.demand_results._pi[i, j] = pi_initial
theta_index = theta_index + 1
self.demand_results._delta = delta_estimate
# perturb alpha in negative (positive) direction and recompute markups
price_index = [index for index, value in enumerate(self.demand_results.beta_labels) if value == 'prices']
if price_index:
alpha_initial = self.demand_results.beta[price_index].copy()
self.demand_results._beta[price_index] = alpha_initial - epsilon / 2
markups_l, md, ml = build_markups(
self.products, self.demand_results, self.models["models_downstream"],
self.models["ownership_downstream"], self.models["models_upstream"],
self.models["ownership_upstream"], self.models["vertical_integration"],
self.models["custom_model_specification"], self.models["user_supplied_markups"]
)
self.demand_results._beta[price_index] = alpha_initial + epsilon / 2
markups_u, mu, mu = build_markups(
self.products, self.demand_results, self.models["models_downstream"],
self.models["ownership_downstream"], self.models["models_upstream"],
self.models["ownership_upstream"], self.models["vertical_integration"],
self.models["custom_model_specification"], self.models["user_supplied_markups"]
)
# compute markup perturbations for taxes
for m in range(M):
markups_u[m] = markups_computation(markups_u[m])
markups_l[m] = markups_computation(markups_l[m])
gradient_markups = self._compute_first_difference_markups(
markups_u, markups_l, epsilon, theta_index, gradient_markups
)
self.demand_results._beta[price_index] = alpha_initial
theta_index = theta_index + 1
# first differencing for the nesting parameter rho
if len(self.demand_results.rho) != 0:
rho_initial = self.demand_results.rho.copy()
# perturb rho in the negative direction and recompute markups
self.demand_results._rho = rho_initial - epsilon / 2
with contextlib.redirect_stdout(open(os.devnull, 'w')):
delta_new = self.demand_results.compute_delta()
self.demand_results._delta = delta_new
markups_l, md, ml = build_markups(
self.products, self.demand_results, self.models["models_downstream"],
self.models["ownership_downstream"], self.models["models_upstream"],
self.models["ownership_upstream"], self.models["vertical_integration"],
self.models["custom_model_specification"], self.models["user_supplied_markups"]
)
# perturb rho in the positive direction and recompute markups
self.demand_results._rho = rho_initial + epsilon / 2
with contextlib.redirect_stdout(open(os.devnull, 'w')):
delta_new = self.demand_results.compute_delta()
self.demand_results._delta = delta_new
markups_u, mu, mu = build_markups(
self.products, self.demand_results, self.models["models_downstream"],
self.models["ownership_downstream"], self.models["models_upstream"],
self.models["ownership_upstream"], self.models["vertical_integration"],
self.models["custom_model_specification"], self.models["user_supplied_markups"]
)
# compute markup perturbations for taxes
for m in range(M):
markups_u[m] = markups_computation(markups_u[m])
markups_l[m] = markups_computation(markups_l[m])
gradient_markups = self._compute_first_difference_markups(
markups_u, markups_l, epsilon, theta_index, gradient_markups
)
self.demand_results_.rho = rho_initial
# initialize empty lists to store statistic-related values for each model
g_list = [None] * L
Q_list = [None] * L
RV_numerator_list = [None] * L
RV_denominator_list = [None] * L
test_statistic_RV_list = [None] * L
F_statistic_list = [None] * L
unscaled_F_statistic_list = [None] * L
MCS_p_values_list = [None] * L
rho_list = [None] * L
F_cv_size_list = [None] * L
F_cv_power_list = [None] * L
symbols_size_list = [None] * L
symbols_power_list = [None] * L
# compare models of conduct for each set of instruments
for instrument in range(L):
instruments = self.products["Z{0}".format(instrument)]
K = np.shape(instruments)[1]
# absorb any cost fixed effects from prices, markups, and instruments
if self._absorb_cost_ids is not None:
Z_orthogonal, Z_errors = self._absorb_cost_ids(instruments)
else:
Z_orthogonal = instruments
Z_residual = sm.OLS(Z_orthogonal, self.products.w).fit().resid
Z_orthogonal = np.reshape(Z_residual, [N, K])
# initialize variables to store GMM measure of fit Q_m for each model
g = np.zeros((M, K), dtype=options.dtype)
Q = np.zeros(M, dtype=options.dtype)
# compute the weight matrix
W_inverse = 1 / N * (Z_orthogonal.T @ Z_orthogonal)
W_inverse = np.reshape(W_inverse, [K, K])
weight_matrix = inv(W_inverse)
# for each model compute GMM measure of fit
for m in range(M):
g[m] = 1 / N * (Z_orthogonal.T @ (np.squeeze(prices_orthogonal) - markups_orthogonal[m]))
Q[m] = g[m].T @ weight_matrix @ g[m]
# compute the pairwise RV numerator
test_statistic_numerator = np.zeros((M, M))
for m in range(M):
for i in range(m):
if i < m:
test_statistic_numerator[i, m] = math.sqrt(N) * (Q[i] - Q[m])
# initialize the RV test statistic denominator and construct weight matrices
test_statistic_denominator = np.zeros((M, M))
covariance_mc = np.zeros((M, M))
W_12 = fractional_matrix_power(weight_matrix, 0.5)
W_34 = fractional_matrix_power(weight_matrix, 0.75)
# compute psi, which is used in the estimator of the covariance between weighted moments
psi = np.zeros((M, N, K), dtype=options.dtype)
if demand_adjustment:
adjustment_value = np.zeros((M, K, H_prime_wd.shape[1]), dtype=options.dtype)
for m in range(M):
psi_bar = W_12 @ g[m] - .5 * W_34 @ W_inverse @ W_34 @ g[m]
W_34_Zg = Z_orthogonal @ W_34 @ g[m]
W_34_Zg = W_34_Zg[:, np.newaxis]
marginal_cost_orthogonal = (np.squeeze(prices_orthogonal) - markups_orthogonal[m])
marginal_cost_orthogonal = marginal_cost_orthogonal[:, np.newaxis]
psi_i = (marginal_cost_orthogonal * Z_orthogonal) @ W_12 - 0.5 * W_34_Zg * (Z_orthogonal @ W_34.T)
psi[m] = psi_i - np.transpose(psi_bar)
# make a demand adjustment
if demand_adjustment:
G_k = -1 / N * np.transpose(Z_orthogonal) @ gradient_markups[m]
G_m[m] = G_k
adjustment_value[m] = W_12 @ G_m[m] @ inv(H_prime_wd @ H) @ H_prime_wd
psi[m] = psi[m] - (h_i - np.transpose(h)) @ np.transpose(adjustment_value[m])
# initialize model confidence set containers
model_confidence_set = np.array(range(M))
all_model_combinations = list(itertools.combinations(model_confidence_set, 2))
number_model_combinations = np.shape(all_model_combinations)[0]
model_confidence_set_variance = np.zeros([number_model_combinations, 1])
# compute the RV test statistic denominator
for m in range(M):
for i in range(m):
if i < m:
variance_covariance = self._compute_variance_covariance(m, i, N, clustering_adjustment, psi)
weighted_variance = W_12 @ variance_covariance @ W_12
operations = np.array([1, 1, -2])
moments = np.array([
g[i].T @ weighted_variance[0] @ g[i],
g[m].T @ weighted_variance[1] @ g[m],
g[i].T @ weighted_variance[2] @ g[m]
]).flatten()
sigma_squared = 4 * (operations.T @ moments)
# compute the covariance matrix for marginal costs
covariance_mc[i, m] = moments[2]
covariance_mc[m, i] = covariance_mc[i, m]
covariance_mc[m, m] = moments[1]
covariance_mc[i, i] = moments[0]
test_statistic_denominator[i, m] = math.sqrt(sigma_squared)
# compute the pairwise RV test statistic
rv_test_statistic = np.zeros((M, M))
for (m, i) in itertools.product(range(M), range(M)):
if i < m:
rv_test_statistic[i, m] = test_statistic_numerator[i, m] / test_statistic_denominator[i, m]
else:
rv_test_statistic[i, m] = "NaN"
# compute the pairwise F-statistic for each model
unscaled_F = np.zeros((M, M))
F = np.zeros((M, M))
pi = np.zeros((K, M))
phi = np.zeros([M, N, K])
rho = np.zeros((M, M))
F_cv_size = np.empty((M, M), dtype=object)
F_cv_power = np.empty((M, M), dtype=object)
symbols_size = np.empty((M, M), dtype=object)
symbols_power = np.empty((M, M), dtype=object)
for m in range(M):
ols_results = sm.OLS(np.squeeze(prices_orthogonal) - markups_orthogonal[m], Z_orthogonal).fit()
pi[:, m] = ols_results.params
e = np.reshape(ols_results.resid, [N, 1])
phi[m] = (e * Z_orthogonal) @ weight_matrix
if demand_adjustment:
phi[m] = phi[m] - (h_i - np.transpose(h)) @ np.transpose(W_12 @ adjustment_value[m])
# compute the F statistic for each pair of models
for (m, i) in itertools.product(range(M), range(M)):
if i < m:
variance = self._compute_variance_covariance(m, i, N, clustering_adjustment, phi)
sigma = 1 / K * np.array([
np.trace(variance[0] @ W_inverse), np.trace(variance[1] @ W_inverse),
np.trace(variance[2] @ W_inverse)
])
numerator_sqrt = (sigma[0] - sigma[1])
denominator_sqrt = np.sqrt((sigma[0] + sigma[1]) * (sigma[0] + sigma[1]) - 4 * sigma[2] ** 2)
rho[i, m] = numerator_sqrt / denominator_sqrt
rho_squared = np.square(rho[i, m])
# construct F statistic
operations = np.array([sigma[1], sigma[0], -2 * sigma[2]])
moments = np.array([
g[i].T @ weight_matrix @ g[i],
g[m].T @ weight_matrix @ g[m],
g[i].T @ weight_matrix @ g[m]
]).flatten()
F_numerator = operations @ moments
F_denominator = (sigma[0] * sigma[1] - sigma[2] ** 2)
unscaled_F[i, m] = N / (2 * K) * F_numerator / F_denominator
F[i, m] = (1 - rho_squared) * N / (2 * K) * F_numerator / F_denominator
# pull out critical values for size and power
rho_lookup = np.round(np.abs(rho[i, m]), 2)
if rho_lookup > .99:
rho_lookup = .99
ind = np.where((critical_values_size['K'] == K) & (critical_values_size['rho'] == rho_lookup))[0][0]
F_cv_size[i, m] = np.array([
critical_values_size['r_125'][ind],
critical_values_size['r_10'][ind],
critical_values_size['r_075'][ind]
], dtype=object)
F_cv_power[i, m] = np.array([
critical_values_power['r_50'][ind],
critical_values_power['r_75'][ind],
critical_values_power['r_95'][ind]
], dtype=object)
# determine F-stat critical values for size
if F[i, m] < F_cv_size[i, m][0]:
symbols_size[i, m] = " "
elif F[i, m] < F_cv_size[i, m][1]:
symbols_size[i, m] = "*"
elif F[i, m] < F_cv_size[i, m][2]:
symbols_size[i, m] = "**"
else:
symbols_size[i, m] = "***"
# determine F-stat critical values for power
if F[i, m] < F_cv_power[i, m][0]:
symbols_power[i, m] = " "
elif F[i, m] < F_cv_power[i, m][1]:
symbols_power[i, m] = "^"
elif F[i, m] < F_cv_power[i, m][2]:
symbols_power[i, m] = "^^"
else:
symbols_power[i, m] = "^^^"
if i >= m:
F[i, m] = "NaN"
symbols_size[i, m] = ""
symbols_power[i, m] = ""
# compute the sigma model confidence set
sigma_model_confidence_set = np.zeros([number_model_combinations, number_model_combinations])
for index_i, model_i in enumerate(all_model_combinations):
model_confidence_set_variance[index_i] = test_statistic_denominator[model_i[0], model_i[1]] / 2
for index_j, model_j in enumerate(all_model_combinations):
term1 = covariance_mc[model_i[0], model_j[0]] - covariance_mc[model_i[1], model_j[0]]
term2 = covariance_mc[model_i[0], model_j[1]] - covariance_mc[model_i[1], model_j[1]]
sigma_model_confidence_set[index_j, index_i] = term1 - term2
denominator = model_confidence_set_variance @ model_confidence_set_variance.T
sigma_model_confidence_set = sigma_model_confidence_set / denominator
# construct the model confidence set by iterating through all model pairs and comparing their test
# statistics
converged = False
model_confidence_set_pvalues = np.ones([M, 1])
while not converged:
# if we are on the last pair of models, use the model of worst fit to compute the p-value
if np.shape(model_confidence_set)[0] == 2:
max_test_statistic = rv_test_statistic[model_confidence_set[0], model_confidence_set[1]]
if np.sign(max_test_statistic) >= 0:
worst_fit = model_confidence_set[0]
max_test_statistic = -max_test_statistic
else:
worst_fit = model_confidence_set[1]
model_confidence_set_pvalues[worst_fit] = 2 * norm.cdf(max_test_statistic)
converged = True
else:
model_1 = []
model_2 = []
current_combinations = list(itertools.combinations(model_confidence_set, 2))
number_model_combinations = np.shape(current_combinations)[0]
sigma_index = np.empty(number_model_combinations, dtype=int)
# for each pair of models, find the RV test statistic and the max test statistic among the model
# pairs
for model_pair in range(number_model_combinations):
model_1.append(current_combinations[model_pair][0])
model_2.append(current_combinations[model_pair][1])
sigma_index[model_pair] = all_model_combinations.index(current_combinations[model_pair])
test_statistic_model_confidence_set = rv_test_statistic[model_1, model_2]
index = np.argmax(abs(test_statistic_model_confidence_set))
max_test_statistic = test_statistic_model_confidence_set[index]
# find the model with the worst fit and remove it from the comparison set
if np.sign(max_test_statistic) >= 0:
worst_fit = model_1[index]
else:
worst_fit = model_2[index]
max_test_statistic = -max_test_statistic
mean = np.zeros([np.shape(current_combinations)[0]])
cov = sigma_model_confidence_set[sigma_index[:, None], sigma_index]
simulated_test_statistics = np.random.multivariate_normal(mean, cov, options.ndraws)
max_simulated_statistic = np.amax(abs(simulated_test_statistics), 1)
model_confidence_set_pvalues[worst_fit] = np.mean(max_simulated_statistic > max_test_statistic)
model_confidence_set = np.delete(model_confidence_set, np.where(model_confidence_set == worst_fit))
# update the output list
g_list[instrument] = g
Q_list[instrument] = Q
RV_numerator_list[instrument] = test_statistic_numerator
RV_denominator_list[instrument] = test_statistic_denominator
test_statistic_RV_list[instrument] = rv_test_statistic
F_statistic_list[instrument] = F
unscaled_F_statistic_list[instrument] = unscaled_F
MCS_p_values_list[instrument] = model_confidence_set_pvalues
rho_list[instrument] = rho
F_cv_size_list[instrument] = F_cv_size
F_cv_power_list[instrument] = F_cv_power
symbols_size_list[instrument] = symbols_size
symbols_power_list[instrument] = symbols_power
# return results
results = ProblemResults(Progress(
self, markups, markups_downstream, markups_upstream, marginal_cost, tau_list, g_list, Q_list,
RV_numerator_list, RV_denominator_list, test_statistic_RV_list, F_statistic_list, MCS_p_values_list,
rho_list, unscaled_F_statistic_list, F_cv_size_list, F_cv_power_list, symbols_size_list,
symbols_power_list
))
step_end_time = time.time()
total_time = step_end_time - step_start_time
print('Total Time is ... ' + str(total_time))
output("")
output(results)
return results
def _compute_first_difference_markups(self, markups_u, markups_l, epsilon, theta_index, gradient_markups):
"""Compute first differences and return the gradient."""
for m in range(self.M):
diff_markups = (markups_u[m] - markups_l[m]) / epsilon
if self._absorb_cost_ids is not None:
diff_markups, me = self._absorb_cost_ids(diff_markups)
ols_result = sm.OLS(diff_markups, self.products.w).fit()
gradient_markups[m][:, theta_index] = ols_result.resid
return gradient_markups
def _compute_perturbation(self, i, j, perturbation):
"""Perturb pi and recompute markups."""
self.demand_results._pi[i, j] = perturbation
with contextlib.redirect_stdout(open(os.devnull, 'w')):
delta_new = self.demand_results.compute_delta()
self.demand_results._delta = delta_new
return build_markups(
self.products, self.demand_results, self.models["models_downstream"], self.models["ownership_downstream"],
self.models["models_upstream"], self.models["ownership_upstream"], self.models["vertical_integration"],
self.models["custom_model_specification"], self.models["user_supplied_markups"]
)
def _compute_variance_covariance(self, m, i, N, se_type, var):
"""Compute the variance covariance matrix."""
variance_covariance = 1 / N * np.array([
var[i].T @ var[i], var[m].T @ var[m], var[i].T @ var[m]
])
if se_type == 'clustered':
cluster_ids = np.unique(self.products.clustering_ids)
for j in cluster_ids:
index = np.where(self.products.clustering_ids == j)[0]
var1_l = var[i][index, :]
var2_l = var[m][index, :]
var1_c = var1_l
var2_c = var2_l
# update the matrix
for k in range(len(index) - 1):
var1_c = np.roll(var1_c, 1, axis=0)
var2_c = np.roll(var2_c, 1, axis=0)
update = 1 / N * np.array([
var1_l.T @ var1_c, var2_l.T @ var2_c, var1_l.T @ var2_c
])
variance_covariance = variance_covariance + update
return variance_covariance
class Problem(ProblemEconomy):
r"""A firm conduct testing-type problem.
This class is initialized using the relevant data and formulations, and solved with :meth:`Problem.solve`.
Parameters
----------
cost_formulation: `Formulation`
:class:`Formulation` is a list of the variables for observed product characteristics. All observed cost shifters
included in this formulation must be variables in the `product_data`. To use a constant, one would replace `0`
with `1`. To absorb fixed effects, specify `absorb = 'C(variable)'`, where the `variable` must also be in the
`product_data`. Including this option implements fixed effects absorption using
[PYHDFE](https://github.com/jeffgortmaker/pyhdfe), a companion package to PyBLP.
instrument_formulation: `Formulation or sequence of Formulation`
:class:`Formulation` is list of the variables used as excluded instruments for testing. For each instrument
formulation, there should never be a constant. The user can specify as many instrument formulations as desired.
All instruments must be variables in `product_data`.
.. note::
**Our instrument naming conventions differ from PyBLP**. With PyBLP, one specifies the excluded instruments
for demand estimation via a naming convention in the product_data: each excluded instrument for demand
estimation begins with `"demand_instrument"` followed by a number ( i.e., `demand_instrument0`). In
pyRVtest, you specify directly the names of the variables in the `product_data` that you want to use as
excluded instruments for testing (i.e., if you want to test with one instrument using the variable in the
`product_data` named, "transportation_cost" one could specify
`pyRVtest.Formulation('0 + transportation_cost')`.
model_formulations: `sequence of ModelFormulation`
:class:`ModelFormulation` defines the models that the researcher wants to test. There must be at least two
instances of `ModelFormulation` specified to run the firm conduct testing procedure.
product_data: `structured array-like`
This is the data containing product and market observable characteristics, as well as instruments.
pyblp_results`: `structured array-like`
The results object returned by `pyblp.solve`.
"""
def __init__(
self, cost_formulation: Formulation, instrument_formulation: Sequence[Formulation],
product_data: Mapping, demand_results: Mapping, model_formulations: Sequence[ModelFormulation] = None,
markup_data: Optional[RecArray] = None) -> None:
"""Initialize the underlying economy with product and agent data before absorbing fixed effects."""
# keep track of long it takes to initialize the problem
output("Initializing the problem ...")
start_time = time.time()
# check if there is markup data to specify number of models
if markup_data is None:
M = len(model_formulations)
else:
M = np.shape(markup_data)[0]
# check if there are instruments and if so count how many
if hasattr(instrument_formulation, '__len__'):
L = len(instrument_formulation)
else:
L = 1
# validate and normalize cost formulation
if not isinstance(cost_formulation, Formulation):
raise TypeError("cost_formulation must be a single Formulation instance.")
# validate instrument formulation
if L == 1:
if not isinstance(instrument_formulation, Formulation):
raise TypeError("instrument_formulation must be a single Formulation instance.")
elif L > 1:
if not all(isinstance(f, Formulation) for f in instrument_formulation):
raise TypeError("Each formulation in instrument_formulation must be a Formulation.")
# initialize the underlying economy with structured product and cost data
products = Products(
cost_formulation=cost_formulation, instrument_formulation=instrument_formulation, product_data=product_data
)
if markup_data is None:
models = Models(model_formulations=model_formulations, product_data=product_data)
markups = [None] * M
else:
models = None
markups = markup_data
super().__init__(
cost_formulation, instrument_formulation, model_formulations, products, models, demand_results, markups
)
# check cost shifters for collinearity
if max(options.collinear_atol, options.collinear_rtol) > 0:
cost_shifters = self.products.w
common_message = "To disable collinearity checks, set options.collinear_atol = options.collinear_rtol = 0."
collinear, successful = precisely_identify_collinearity(cost_shifters)
if not successful:
raise ValueError(
f"Failed to compute the QR decomposition of w while checking for collinearity issues. "
f"{common_message}"
)
if collinear.any():
raise ValueError(
f"Detected collinearity issues with w. "
f"{common_message}"
)
for instrument in range(self.L):
cost_shifters = self.products.w
cost_shifters = np.append(cost_shifters, self.products["Z{0}".format(instrument)], axis=1)
collinear, successful = precisely_identify_collinearity(cost_shifters)
if not successful:
raise ValueError(
f"Failed to compute the QR decomposition of [w,z" + str(instrument) + "] while checking for "
f"collinearity issues. "
f"{common_message}"
)
if collinear.any():
raise ValueError(
f"Detected collinearity issues with [w,z" + str(instrument) + "]."
f"{common_message}"
)
# output information about the initialized problem
output(f"Initialized the problem after {format_seconds(time.time() - start_time)}.")
output("")
output(self)
class Progress(object):
"""Structured information about estimation progress."""
problem: ProblemEconomy
markups: Array
markups_downstream: Array
markups_upstream: Array
tau_list: Array
mc: Array
g: Array
Q: Array
RV_numerator: Array
RV_denominator: Array
test_statistic_RV: Array
F: Array
MCS_pvalues: Array
rho: Array
unscaled_F: Array
F_cv_size_list: Array
F_cv_power_list: Array
symbols_size_list: Array
symbols_power_list: Array
def __init__(
self, problem: ProblemEconomy, markups: Array, markups_downstream: Array, markups_upstream: Array,
mc: Array, taus: Array, g: Array, Q: Array, RV_numerator: Array, RV_denom: Array, test_statistic_RV: Array,
F: Array, MCS_pvalues: Array, rho: Array, unscaled_F: Array, F_cv_size_list: Array,
F_cv_power_list: Array, symbols_size_list: Array, symbols_power_list: Array) -> None:
"""Store progress information, compute the projected gradient and its norm, and compute the reduced Hessian."""
self.problem = problem
self.markups = markups
self.markups_downstream = markups_downstream
self.markups_upstream = markups_upstream
self.tau_list = taus
self.mc = mc
self.g = g
self.Q = Q
self.RV_numerator = RV_numerator
self.RV_denominator = RV_denom
self.test_statistic_RV = test_statistic_RV
self.F = F
self.MCS_pvalues = MCS_pvalues
self.rho = rho
self.unscaled_F = unscaled_F
self.F_cv_size_list = F_cv_size_list
self.F_cv_power_list = F_cv_power_list
self.symbols_size_list = symbols_size_list
self.symbols_power_list = symbols_power_list
| 44,071 | 50.971698 | 120 | py |
pyRVtest | pyRVtest-main/pyRVtest/economies/economy.py | """Economy underlying the firm conduct testing model."""
import abc
from typing import Any, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
from pyblp.utilities.basics import Array, RecArray, StringRepresentation, format_table, get_indices
from ..configurations.formulation import Formulation, Absorb, ModelFormulation
from ..primitives import Container
class Economy(Container, StringRepresentation):
"""An abstract economy underlying the firm conduct testing model."""
model_formulations: Sequence[Optional[ModelFormulation]]
cost_formulation: Formulation
instrument_formulation: Formulation
markups: RecArray
unique_market_ids: Array
unique_nesting_ids: Array
unique_product_ids: Array
T: int
N: int
Dict_K: Dict[Union[str, tuple], Tuple[Optional[Array], Any]] = {}
M: int
EC: int
H: int
L: int
_market_indices: Dict[Hashable, int]
_product_market_indices: Dict[Hashable, Array]
_max_J: int
_absorb_cost_ids: Optional[Absorb]
@abc.abstractmethod
def __init__(
self, cost_formulation: Formulation, instrument_formulation: Formulation,
model_formulations: Sequence[Optional[ModelFormulation]],
products: RecArray, models: RecArray, demand_results: Mapping, markups: RecArray) -> None:
"""Store information about formulations and data. Any fixed effects should be absorbed after initialization."""
# store data and formulations
super().__init__(products, models)
self.cost_formulation = cost_formulation
self.instrument_formulation = instrument_formulation
self.model_formulations = model_formulations
self.demand_results = demand_results
self.markups = markups
# identify unique markets, nests, products, and agents
self.unique_market_ids = np.unique(self.products.market_ids.flatten())
self.unique_nesting_ids = np.unique(self.products.nesting_ids.flatten())
self.unique_product_ids = np.unique(self.products.product_ids.flatten())
# count dimensions
self.N = self.products.shape[0]
self.T = self.unique_market_ids.size
self.L = len(self.instrument_formulation) if hasattr(self.instrument_formulation, '__len__') else 1
for instrument in range(self.L):
self.Dict_K.update({"K{0}".format(instrument): self.products["Z{0}".format(instrument)].shape[1]})
self.M = len(self.model_formulations) if self.markups[0] is None else np.shape(self.markups)[0]
self.EC = self.products.cost_ids.shape[1]
self.H = self.unique_nesting_ids.size
# identify market indices
self._market_indices = {t: i for i, t in enumerate(self.unique_market_ids)}
self._product_market_indices = get_indices(self.products.market_ids)
# identify the largest number of products and agents in a market
self._max_J = max(i.size for i in self._product_market_indices.values())
# construct fixed effect absorption functions
self._absorb_cost_ids = None
if self.EC > 0:
assert cost_formulation is not None
self._absorb_cost_ids = cost_formulation._build_absorb(self.products.cost_ids)
def __str__(self) -> str:
"""Format economy information as a string."""
return "\n\n".join([self._format_dimensions(), self._format_formulations(), self._format_model_formulations()])
def _format_dimensions(self) -> str:
"""Format information about the nonzero dimensions of the economy as a string."""
header: List[str] = []
values: List[str] = []
for key in ['T', 'N', 'M', 'L']:
value = getattr(self, key)
if value > 0:
header.append(f" {key} ")
values.append(str(value))
for instrument in range(self.L):
header.append("d_z{0}".format(instrument))
values.append(str(self.Dict_K["K{0}".format(instrument)]))
return format_table(header, values, title="Dimensions")
def _format_formulations(self) -> str:
"""Format information about the formulations of the economy as a string."""
# construct the data
named_formulations = [(self._w_formulation, "w: Marginal Cost")]
for instruments in range(self.L):
named_formulations.append((
self.Dict_Z_formulation["_Z{0}_formulation".format(instruments)],
"z{0}: Instruments".format(instruments)
))
data: List[List[str]] = []
for formulations, name in named_formulations:
if any(formulations):
data.append([name] + [str(f) for f in formulations])
# construct the header
max_formulations = max(len(r[1:]) for r in data)
header = ["Column Indices:"] + [f" {i} " for i in range(max_formulations)]
return format_table(header, *data, title="Formulations")
def _format_model_formulations(self) -> str:
"""Format information about the model formulations as a string."""
# construct the data
data: List[List[str]] = []
if self.markups[0] is None:
data.append(["Model - Downstream"] + [self.models["models_downstream"][i] for i in range(self.M)])
data.append(["Model - Upstream"] + [self.models["models_upstream"][i] for i in range(self.M)])
data.append(["Firm IDs - Downstream"] + [self.models["firm_ids_downstream"][i] for i in range(self.M)])
data.append(["Firm IDs - Upstream"] + [self.models["firm_ids_upstream"][i] for i in range(self.M)])
data.append(["VI Index"] + [self.models["vertical_integration_index"][i] for i in range(self.M)])
data.append(["Cost Scaling Column"] + [self.models["cost_scaling_column"][i] for i in range(self.M)])
data.append(["Unit Tax"] + [self.models["unit_tax_name"][i] for i in range(self.M)])
data.append(["Advalorem Tax"] + [self.models["advalorem_tax_name"][i] for i in range(self.M)])
data.append(["Advalorem Payer"] + [self.models["advalorem_payer"][i] for i in range(self.M)])
data.append(
["User Supplied Markups"] + [self.models["user_supplied_markups_name"][i] for i in range(self.M)]
)
header = [" "] + [f" {i} " for i in range(self.M)]
else:
data.append(["Markups Supplied by User"])
header = [" "]
return format_table(header, *data, title="Models")
| 6,566 | 45.574468 | 119 | py |
pyRVtest | pyRVtest-main/pyRVtest/economies/__init__.py | """Economies underlying the conduct testing model."""
| 54 | 26.5 | 53 | py |
pyRVtest | pyRVtest-main/pyRVtest/utilities/basics.py | """Basic functionality."""
from typing import Any, Container, Dict, List, Optional, Sequence, Tuple
# define common types
Array = Any
RecArray = Any
Data = Dict[str, Array]
Options = Dict[str, Any]
Bounds = Tuple[Array, Array]
# define a pool managed by parallel and used by generate_items
pool = None
def format_table(
header: Sequence, subheader: Sequence, *data: Sequence, title: Optional[str] = None,
include_notes: bool = False, include_border: bool = True, include_header: bool = True,
include_subheader: bool = True, line_indices: Container[int] = ()) -> str:
"""Format table information as a string, which has fixed widths, vertical lines after any specified indices, and
optionally a title, border, and header.
"""
# construct the header rows
row_index = -1
header_rows: List[List[str]] = []
header = [[c] if isinstance(c, str) else c for c in header]
while True:
header_row = ["" if len(c) < -row_index else c[row_index] for c in header]
if not any(header_row):
break
header_rows.insert(0, header_row)
row_index -= 1
# construct the sub-header rows
row_index = -1
subheader_rows: List[List[str]] = []
subheader = [[c] if isinstance(c, str) else c for c in subheader]
while True:
subheader_row = ["" if len(c) < -row_index else c[row_index] for c in subheader]
if not any(subheader_row):
break
subheader_rows.insert(0, subheader_row)
row_index -= 1
# construct the data rows
data_rows = [[str(c) for c in r] + [""] * (len(header) - len(r)) for r in data]
# compute column widths
widths = []
for column_index in range(len(header)):
widths.append(max(len(r[column_index]) for r in header_rows + subheader_rows + data_rows))
# build the template
template = " " .join("{{:^{}}}{}".format(w, " |" if i in line_indices else "") for i, w in enumerate(widths))
template_notes = " " .join("{{:^{}}}{}".format(w, " " if i in line_indices else "") for i, w in enumerate(widths))
# build the table
lines = []
if title is not None:
lines.append(f"{title}:")
if include_border:
lines.append("=" * len(template.format(*[""] * len(widths))))
if include_header:
lines.extend([template.format(*r) for r in header_rows])
lines.append(template.format(*("-" * w for w in widths)))
if include_subheader:
lines.extend([template.format(*r) for r in subheader_rows])
lines.append(template.format(*("-" * w for w in widths)))
lines.extend([template.format(*r) for r in data_rows])
if include_border:
lines.append("=" * len(template.format(*[""] * len(widths))))
if include_notes:
notes: List[List[str]] = []
notes.append(['Significance of size and power diagnostic reported below each F-stat'])
notes.append(
['*, **, or *** indicate that F > cv for a worst-case size of 0.125, 0.10, and 0.075 given d_z and rho']
)
notes.append(
['^, ^^, or ^^ indicate that F > cv for a best-case power of 0.50, 0.75, and 0.95 given d_z and rho']
)
notes.append([
'appropriate critical values for size are stored in the variable F_cv_size_list of the pyRVtest results '
'class'
])
notes.append([
'appropriate critical values for power are stored in the variable F_cv_power_list of the pyRVtest '
'results class'
])
notes_rows = [[str(c) for c in r] + [""] * (len(header) - len(r)) for r in notes]
lines.extend([template_notes.format(*r) for r in notes_rows])
lines.append("=" * len(template_notes.format(*[""] * len(widths))))
return "\n".join(lines)
| 3,811 | 39.126316 | 120 | py |
pyRVtest | pyRVtest-main/pyRVtest/utilities/__init__.py | """General functionality."""
| 29 | 14 | 28 | py |
pyRVtest | pyRVtest-main/pyRVtest/configurations/formulation.py | """Formulation of data matrices and absorption of fixed effects."""
import token
from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Tuple, Type, Union
import numpy as np
import patsy
import patsy.builtins
import patsy.contrasts
import patsy.desc
import patsy.design_info
import patsy.origin
from pyblp.utilities.basics import (Array, Data, StringRepresentation, extract_size, interact_ids)
from pyblp.configurations.formulation import (
Absorb, ColumnFormulation, CategoricalTreatment, parse_terms, design_matrix, parse_term_expression
)
import sympy as sp
import sympy.parsing.sympy_parser
class Formulation(StringRepresentation):
r"""Configuration for designing matrices and absorbing fixed effects.
.. note::
This class is a copy of the Formulation class from PyBLP.
Internally, the `patsy <https://patsy.readthedocs.io/en/stable/>`_ package is used to convert data and R-style
formulas into matrices. All of the standard
`binary operators <https://patsy.readthedocs.io/en/stable/formulas.html#operators>`_ can be used to design complex
matrices of factor interactions:
- ``+`` - Set union of terms.
- ``-`` - Set difference of terms.
- ``*`` - Short-hand. The formula ``a * b`` is the same as ``a + b + a:b``.
- ``/`` - Short-hand. The formula ``a / b`` is the same as ``a + a:b``.
- ``:`` - Interactions between two sets of terms.
- ``**`` - Interactions up to an integer degree.
However, since factors need to be differentiated (for example, when computing elasticities), only the most essential
functions are supported:
- ``C`` - Mark a variable as categorical. See :func:`patsy.builtins.C`. Arguments are not supported.
- ``I`` - Encapsulate mathematical operations. See :func:`patsy.builtins.I`.
- ``log`` - Natural logarithm function.
- ``exp`` - Natural exponential function.
Data associated with variables should generally already be transformed. However, when encapsulated by ``I()``, these
operators function like normal mathematical operators on numeric variables: ``+`` adds, ``-`` subtracts, ``*``
multiplies, ``/`` divides, and ``**`` exponentiates.
Internally, mathematical operations are parsed and evaluated by the `SymPy <https://www.sympy.org/en/index.html>`_
package, which is also used to symbolically differentiate terms when derivatives are needed.
Parameters
----------
formula : `str`
R-style formula used to design a matrix. Variable names will be validated when this formulation and data are
passed to a function that uses them. By default, an intercept is included, which can be removed with ``0`` or
``-1``. If ``absorb`` is specified, intercepts are ignored.
absorb : `str, optional`
R-style formula used to design a matrix of categorical variables representing fixed effects, which will be
absorbed into the matrix designed by ``formula`` by the `PyHDFE <https://pyhdfe.readthedocs.io/en/stable/>`_
package. Fixed effect absorption is only supported for some matrices. Unlike ``formula``, intercepts are
ignored. Only categorical variables are supported.
absorb_method : `str, optional`
Method by which fixed effects will be absorbed. For a full list of supported methods, refer to the
``residualize_method`` argument of :func:`pyhdfe.create`.
By default, the simplest methods are used: simple de-meaning for a single fixed effect and simple iterative
de-meaning by way of the method of alternating projections (MAP) for multiple dimensions of fixed effects. For
multiple dimensions, non-accelerated MAP is unlikely to be the fastest algorithm. If fixed effect absorption
seems to be taking a long time, consider using a different method such as ``'lsmr'``, using ``absorb_options``
to specify a MAP acceleration method, or configuring other options such as termination tolerances.
absorb_options : `dict, optional`
Configuration options for the chosen ``method``, which will be passed to the ``options`` argument of
:func:`pyhdfe.create`.
"""
_formula: str
_absorb: Optional[str]
_absorb_method: Optional[str]
_absorb_options: dict
_terms: List[patsy.desc.Term]
_absorbed_terms: List[patsy.desc.Term]
_expressions: List[sp.Expr]
_absorbed_expressions: List[sp.Expr]
_names: Set[str]
_absorbed_names: Set[str]
def __init__(
self, formula: str, absorb: Optional[str] = None, absorb_method: Optional[str] = None,
absorb_options: Optional[Mapping] = None) -> None:
"""Parse the formula into patsy terms and SymPy expressions. In the process, validate it as much as possible
without any data.
"""
# validate the formulas
if not isinstance(formula, str):
raise TypeError("formula must be a str.")
if absorb is not None and not isinstance(absorb, str):
raise TypeError("absorb must be a None or a str.")
# parse the formulas into patsy terms
self._formula = formula
self._absorb = absorb
self._terms = parse_terms(formula)
self._absorbed_terms: List[patsy.desc.Term] = []
if absorb is not None:
self._absorbed_terms = parse_terms(f'{absorb} - 1')
# ignore intercepts if there are any absorbed terms and check that there is at least one term
if self._absorbed_terms:
self._terms = [t for t in self._terms if t != patsy.desc.INTERCEPT]
if not self._terms:
raise patsy.PatsyError("formula has no terms.", patsy.origin.Origin(formula, 0, len(formula)))
# parse the terms into SymPy expressions and extract variable names
self._expressions = [parse_term_expression(t) for t in self._terms]
self._absorbed_expressions = [parse_term_expression(t) for t in self._absorbed_terms]
self._names = {str(s) for e in self._expressions for s in e.free_symbols}
self._absorbed_names = {str(s) for e in self._absorbed_expressions for s in e.free_symbols}
if sum(not e.free_symbols for e in self._expressions) > 1:
origin = patsy.origin.Origin(formula, 0, len(formula))
raise patsy.PatsyError("formula should have at most one constant term.", origin)
if self._absorbed_expressions and any(not e.free_symbols for e in self._absorbed_expressions):
assert absorb is not None
origin = patsy.origin.Origin(absorb, 0, len(absorb))
raise patsy.PatsyError("absorb should not have any constant terms.", origin)
# validate fixed effect absorption options
if absorb_method is not None and not isinstance(absorb_method, str):
raise TypeError("absorb_method must be None or a string.")
if absorb_options is None:
absorb_options = {}
elif not isinstance(absorb_options, dict):
raise TypeError("absorb_options must be None or a dict.")
self._absorb_method = absorb_method
self._absorb_options = absorb_options
def __reduce__(self) -> Tuple[Type['Formulation'], Tuple]:
"""Handle pickling."""
return (self.__class__, (self._formula, self._absorb, self._absorb_method, self._absorb_options))
def __str__(self) -> str:
"""Format the terms as a string."""
names: List[str] = []
for term in self._terms:
names.append('1' if term == patsy.desc.INTERCEPT else term.name())
for absorbed_term in self._absorbed_terms:
names.append(f'Absorb[{absorbed_term.name()}]')
return ' + '.join(names)
def _build_matrix(self, data: Mapping) -> Tuple[Array, List['ColumnFormulation'], Data]:
"""Convert a mapping from variable names to arrays into the designed matrix, a list of column formulations that
describe the columns of the matrix, and a mapping from variable names to arrays of data underlying the matrix,
which include unchanged continuous variables and indicators constructed from categorical variables.
"""
# normalize the data
data_mapping: Data = {}
for name in self._names:
try:
data_mapping[name] = np.asarray(data[name]).flatten()
except Exception as exception:
origin = patsy.origin.Origin(self._formula, 0, len(self._formula))
raise patsy.PatsyError(f"Failed to load data for '{name}'.", origin) from exception
# always have at least one column to represent the size of the data
if not data_mapping:
data_mapping = {'': np.zeros(extract_size(data))}
# design the matrix (adding an intercept term if there are absorbed terms gets Patsy to use reduced coding)
if self._absorbed_terms:
matrix_design = design_matrix([patsy.desc.INTERCEPT] + self._terms, data_mapping)
else:
matrix_design = design_matrix(self._terms, data_mapping)
# store matrix column indices and build column formulations for each designed column (ignore the intercept if
# it was added only to get Patsy to use reduced coding)
column_indices: List[int] = []
column_formulations: List[ColumnFormulation] = []
for term, expression in zip(self._terms, self._expressions):
if term != patsy.desc.INTERCEPT or not self._absorbed_terms:
term_slice = matrix_design.term_slices[term]
for index in range(term_slice.start, term_slice.stop):
column_indices.append(index)
formula = '1' if term == patsy.desc.INTERCEPT else matrix_design.column_names[index]
column_formulations.append(ColumnFormulation(formula, expression))
# construct a mapping from continuous variable names that appear in at least one column to their arrays
underlying_data: Data = {}
for formulation in column_formulations:
for symbol in formulation.expression.free_symbols:
underlying_data[symbol.name] = data_mapping.get(symbol.name)
# supplement the mapping with indicators constructed from categorical variables
for factor, info in matrix_design.factor_infos.items():
if info.type == 'categorical':
indicator_design = design_matrix([patsy.desc.Term([factor])], data_mapping)
indicator_matrix = build_matrix(indicator_design, data_mapping)
for name, indicator in zip(indicator_design.column_names, indicator_matrix.T):
symbol = CategoricalTreatment.parse_full_symbol(name)
if symbol.name in underlying_data:
underlying_data[symbol.name] = indicator
matrix = build_matrix(matrix_design, data_mapping)
return matrix[:, column_indices], column_formulations, underlying_data
def _build_ids(self, data: Mapping) -> Array:
"""Convert a mapping from variable names to arrays into the designed matrix of IDs to be absorbed."""
# normalize the data
data_mapping: Data = {}
for name in self._absorbed_names:
try:
data_mapping[name] = np.asarray(data[name]).flatten()
except Exception as exception:
assert self._absorb is not None
origin = patsy.origin.Origin(self._absorb, 0, len(self._absorb))
raise patsy.PatsyError(f"Failed to load data for '{name}'.", origin) from exception
# build columns of absorbed IDs
ids_columns: List[Array] = []
for term in self._absorbed_terms:
factor_columns: List[Array] = []
term_design = design_matrix([term], data_mapping)
for factor, info in term_design.factor_infos.items():
if info.type != 'categorical':
raise patsy.PatsyError("Only categorical variables can be absorbed.", factor.origin)
symbol = parse_expression(factor.name())
factor_columns.append(data_mapping[symbol.name])
ids_columns.append(interact_ids(*factor_columns))
return np.column_stack(ids_columns)
def _build_absorb(self, ids: Array) -> 'Absorb':
"""Build a function used to absorb fixed effects defined by columns of IDs."""
import pyhdfe
return Absorb(pyhdfe.create(
ids, drop_singletons=False, compute_degrees=False, residualize_method=self._absorb_method,
options=self._absorb_options
))
class ModelFormulation(object):
r"""Configuration for designing matrices and absorbing fixed effects.
For each model, the user can specify the downstream and upstream (optional) models, the downstream and upstream
ownership structure, a custom model and markup formula, and vertical integration. The user can also choose to forgo
markup computation and specify their own markups with `user_supplied_markups`. Additionally, there are
specifications related to testing conduct with taxes.
There is a built-in library of models that the researcher can choose from.
Here, we have another difference with PyBLP. In PyBLP, if one wants to build an ownership matrix, there must be a
variable called `firm_id` in the `product_data`. With pyRVtest, the researcher can pass any variable in the
`product_data` as `ownership_downstream` and from this, the ownership matrix in each market will be built.
.. note::
We are working on adding additional models to this library as well as options for the researcher to specify
their own markup function.)
Parameters
----------
model_downstream : `str, optional`
The model of conduct for downstream firms (or if no vertical structure, the model of conduct). One of
"bertrand", "cournot", "monopoly", "perfect_competition", or "other".
model_upstream : `str, optional`
The model of conduct for upstream firms. One of "bertrand", "cournot", "monopoly", "perfect_competition", or
"other".
ownership_downstream: `str, optional`
Column indicating which firm ids to use for ownership matrix construction for downstream firms.
ownership_upstream: `str, optional`
Column indicating which firm ids to use for ownership matrix construction for upstream firms.
custom_model_specification: `dict, optional`
A dictionary containing an optional custom markup formula specified by the user. The specified function must
consist of objects computed within the package.
vertical_integration: `str, optional`
The column name for the data column which indicates the vertical ownership structure.
unit_tax: `str, optional`
The column name for the vector containing information on unit taxes.
advalorem_tax: `str, optional`
The column name for the vector containing information on advalorem taxes.
advalorem_payer: `str, optional`
A string indicating who pays for the advalorem tax in the given model.
cost_scaling: `str, optional`
The column name for the cost scaling parameter.
kappa_specification_downstream: `Union[str, Callable[[Any, Any], float]]], optional`
Information on the degree of cooperation among downstream firms for each market.
kappa_specification_upstream: `Union[str, Callable[[Any, Any], float]]], optional`
Information on the degree of cooperation among upstream firms for each market.
user_supplied_markups: `str, optional`
The name of the column containing user-supplied markups.
"""
_model_downstream: Optional[str]
_model_upstream: Optional[str]
_ownership_downstream: Optional[str]
_ownership_upstream: Optional[str]
_custom_model_specification: Optional[dict]
_vertical_integration: Optional[str]
_unit_tax: Optional[str]
_advalorem_tax: Optional[str]
_advalorem_payer: Optional[str]
_cost_scaling: Optional[str]
_kappa_specification_downstream: Optional[Union[str, Callable[[Any, Any], float]]]
_kappa_specification_upstream: Optional[Union[str, Callable[[Any, Any], float]]]
_user_supplied_markups: Optional[str]
def __init__(
self, model_downstream: Optional[str] = None, model_upstream: Optional[str] = None,
ownership_downstream: Optional[str] = None, ownership_upstream: Optional[str] = None,
custom_model_specification: Optional[dict] = None, vertical_integration: Optional[str] = None,
unit_tax: Optional[str] = None, advalorem_tax: Optional[str] = None, advalorem_payer: Optional[str] = None,
cost_scaling: Optional[str] = None,
kappa_specification_downstream: Optional[Union[str, Callable[[Any, Any], float]]] = None,
kappa_specification_upstream: Optional[Union[str, Callable[[Any, Any], float]]] = None,
user_supplied_markups: Optional[str] = None) -> None:
"""Parse the formula into patsy terms and SymPy expressions. In the process, validate it as much as possible
without any data.
"""
# validate the parameters
model_set = {'monopoly', 'cournot', 'bertrand', 'perfect_competition', 'other'}
if model_downstream is None and user_supplied_markups is None:
raise TypeError("Either model_downstream or user_supplied_markups must be provided.")
if model_downstream is not None and model_downstream not in model_set:
raise TypeError("model_downstream must be monopoly, bertrand, cournot, perfect_competition, or other.")
if model_upstream is not None and model_upstream not in model_set:
raise TypeError("model_upstream must be monopoly, bertrand, cournot, perfect_competition, or other.")
if model_upstream is not None and model_downstream in {'cournot'} and model_upstream in {'cournot'}:
raise TypeError("model_upstream and model_downstream cannot both be cournot.")
if ownership_downstream is not None and not isinstance(ownership_downstream, str):
raise TypeError("ownership_downstream must be a None or a str.")
if ownership_upstream is not None and not isinstance(ownership_upstream, str):
raise TypeError("ownership_upstream must be a None or a str.")
if model_upstream is not None and not isinstance(ownership_upstream, str):
raise TypeError("ownership_upstream must be a str when upstream model defined.")
if vertical_integration is not None and not isinstance(vertical_integration, str):
raise TypeError("vertical_integration must be a None or a str.")
if unit_tax is not None and not isinstance(unit_tax, str):
raise TypeError("unit_tax must be a None or a str.")
if advalorem_tax is not None and not isinstance(advalorem_tax, str):
raise TypeError("advalorem_tax must be a None or a str.")
if advalorem_payer is not None and advalorem_payer not in {'firm', 'consumer', 'firms', 'consumers'}:
raise TypeError("advalorem_payer must be a None, firm, or consumer.")
if advalorem_tax is not None and advalorem_payer is None:
raise TypeError("advalorem_payer must be defined as firm or consumer when allowing for advalorem taxes.")
if cost_scaling is not None and not isinstance(cost_scaling, str):
raise TypeError("cost_scaling must be a None or a str.")
# parse the formulas into patsy terms
self._model_downstream = model_downstream
self._model_upstream = model_upstream
self._ownership_downstream = ownership_downstream
self._ownership_upstream = ownership_upstream
self._custom_model_specification = custom_model_specification
self._vertical_integration = vertical_integration
self._unit_tax = unit_tax
self._advalorem_tax = advalorem_tax
self._advalorem_payer = advalorem_payer
self._kappa_specification_downstream = kappa_specification_downstream
self._kappa_specification_upstream = kappa_specification_upstream
self._cost_scaling = cost_scaling
self._user_supplied_markups = user_supplied_markups
def __reduce__(self) -> Tuple[Type['Formulation'], Tuple]:
"""Handle pickling."""
return (self.__class__, (
self._model_downstream, self._model_upstream, self._ownership_downstream, self._ownership_upstream,
self._custom_model_specification, self._vertical_integration, self._custom_model_specification,
self._kappa_specification_downstream, self._kappa_specification_upstream, self._user_supplied_markups
))
def __str__(self) -> str:
"""Format the terms as a string."""
names: List[str] = [self._model_downstream, self._model_upstream]
return ' + '.join(names)
def _build_matrix(self, data: Mapping) -> Dict:
"""Convert a mapping from variable names to arrays into the designed matrix, a list of column formulations that
describe the columns of the matrix, and a mapping from variable names to arrays of data underlying the matrix,
which include unchanged continuous variables and indicators constructed from categorical variables.
"""
model_mapping: Dict[Union[str, Array]] = {}
model_mapping.update({
'model_downstream': self._model_downstream,
'model_upstream': self._model_upstream,
'ownership_downstream': self._ownership_downstream,
'ownership_upstream': self._ownership_upstream,
'custom_model_specification': self._custom_model_specification,
'vertical_integration': self._vertical_integration,
'unit_tax': self._unit_tax,
'advalorem_tax': self._advalorem_tax,
'advalorem_payer': self._advalorem_payer,
'cost_scaling': self._cost_scaling,
'kappa_specification_downstream': self._kappa_specification_downstream,
'kappa_specification_upstream': self._kappa_specification_upstream,
'user_supplied_markups': self._user_supplied_markups
})
return model_mapping
def build_matrix(design: patsy.design_info.DesignInfo, data: Mapping) -> Array:
"""Build a matrix according to its design and data mapping variable names to arrays.
.. note::
This function is a copy from PyBLP for computational speed.
"""
# identify the number of rows in the data
size = next(iter(data.values())).shape[0]
# if the design lacks factors, it must consist of only an intercept term
if not design.factor_infos:
return np.ones((size, 1))
# build the matrix and raise an exception if there are any null values
matrix = patsy.build.build_design_matrices([design], data, NA_action='raise')[0].base
# if the design did not use any data, the matrix may be a single row that needs to be stacked to the proper height
return matrix if matrix.shape[0] == size else np.repeat(matrix[[0]], size, axis=0)
def parse_expression(string: str, mark_categorical: bool = False) -> sp.Expr:
"""Parse a SymPy expression from a string. Optionally, preserve the categorical marker function instead of treating
it like the identify function.
.. note::
This function is a copy from PyBLP for computational speed.
"""
# list reserved patsy and SymPy names that represent special functions and classes
patsy_function_names = {'I', 'C'}
sympy_function_names = {'log', 'exp'}
sympy_class_names = {'Add', 'Mul', 'Pow', 'Integer', 'Float', 'Symbol'}
# build a mapping from reserved names to the functions and classes that they represent (patsy functions are dealt
# with after parsing)
mapping = {n: sp.Function(n) for n in patsy_function_names}
mapping.update({n: getattr(sp, n) for n in sympy_function_names | sympy_class_names})
def transform_tokens(tokens: List[Tuple[int, str]], _: Any, __: Any) -> List[Tuple[int, str]]:
"""Validate a list of tokens and add any unrecognized names as new SymPy symbols."""
transformed: List[Tuple[int, str]] = []
symbol_candidate = None
for code, value in tokens:
if code not in {token.NAME, token.OP, token.NUMBER, token.NEWLINE, token.ENDMARKER}:
raise ValueError(f"The token '{value}' is invalid.")
if code == token.OP and value not in {'+', '-', '*', '/', '**', '(', ')'}:
raise ValueError(f"The operation '{value}' is invalid.")
if code == token.OP and value == '(' and symbol_candidate is not None:
raise ValueError(f"The function '{symbol_candidate}' is invalid.")
if code != token.NAME or value in set(mapping) - sympy_class_names:
transformed.append((code, value))
symbol_candidate = None
continue
if value in sympy_class_names | {'Intercept'}:
raise ValueError(f"The name '{value}' is invalid.")
transformed.extend([(token.NAME, 'Symbol'), (token.OP, '('), (token.NAME, repr(value)), (token.OP, ')')])
symbol_candidate = value
return transformed
# define a function that validates the appearance of categorical marker functions
def validate_categorical(candidate: sp.Expr, depth: int = 0, categorical: bool = False) -> None:
"""Recursively validate that all categorical marker functions in an expression accept only a single variable
argument and that they are not arguments to other functions.
"""
if categorical and depth > 1:
raise ValueError("The C function must not be an argument to another function.")
for arg in candidate.args:
if categorical and not isinstance(arg, sp.Symbol):
raise ValueError("The C function accepts only a single variable.")
validate_categorical(arg, depth + 1, candidate.func == mapping['C'])
# parse the expression, validate it by attempting to represent it as a string, and validate categorical markers
try:
expression = sympy.parsing.sympy_parser.parse_expr(string, mapping, [transform_tokens], evaluate=False)
str(expression)
validate_categorical(expression)
except (TypeError, ValueError) as exception:
raise ValueError(f"The expression '{string}' is malformed.") from exception
# replace patsy functions with the identity function, unless categorical variables are to be explicitly marked
for name in patsy_function_names:
if name != 'C' or not mark_categorical:
expression = expression.replace(mapping[name], sp.Id)
return expression
| 26,918 | 53.602434 | 120 | py |
pyRVtest | pyRVtest-main/pyRVtest/configurations/__init__.py | """Configuration classes."""
| 29 | 14 | 28 | py |
pyRVtest | pyRVtest-main/pyRVtest/data/__init__.py | r"""Locations of critival value tables that are used to evaluate whether the instruments being tested are weak for size
or power.
Attributes
----------
F_CRITICAL_VALUES_POWER_RHO : `str`
Location of a CSV file containing critical values for power for each combination of :math:`\rho` and number of
instruments.
F_CRITICAL_VALUES_SIZE_RHO : `str`
Location of a CSV file containing critical values for size for each combination of :math:`\rho` and number of
instruments.
"""
from pathlib import Path
_DATA_PATH = Path(__file__).resolve().parent
F_CRITICAL_VALUES_POWER_RHO = str(_DATA_PATH / 'f_critical_values_power_rho.csv')
F_CRITICAL_VALUES_SIZE_RHO = str(_DATA_PATH / 'f_critical_values_size_rho.csv')
| 727 | 33.666667 | 119 | py |
pyRVtest | pyRVtest-main/docs/conf.py | """Sphinx configuration."""
import ast
import copy
import json
import os
from pathlib import Path
import re
import shutil
from typing import Any, Optional, Tuple
import astunparse
import sphinx.application
# get the location of the source directory
source_path = Path(__file__).resolve().parent
# project information
language = 'en'
project = 'pyRVtest'
copyright = '2023, Marco Duarte, Lorenzo Magnolfi, Mikkel Solvsten, Christopher Sullivan, and Anya Tarascina'
author = 'Marco Duarte, Lorenzo Magnolfi, Mikkel Solvsten, Christopher Sullivan, and Anya Tarascina'
# configure locations of other configuration files
templates_path = ['templates']
exclude_patterns = ['_build', '_downloads', 'notebooks', 'templates', '**.ipynb_checkpoints']
# identify the RTD version that's being built and associated URLs
rtd_version = os.environ.get('READTHEDOCS_VERSION', 'latest')
rtd_url = f'https://{project.lower()}.readthedocs.io/{language}/{rtd_version}'
pdf_url = f'https://readthedocs.org/projects/{project.lower()}/downloads/pdf/{rtd_version}'
# configure extensions
extensions = [
'sphinx.ext.autosectionlabel',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'nbsphinx'
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3.6/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'patsy': ('https://patsy.readthedocs.io/en/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'pyhdfe': ('https://pyhdfe.readthedocs.io/en/stable/', None),
}
extlinks = {
'rtd': (f'{rtd_url}/%s', None),
'pdf': (f'{pdf_url}/%s', None)
}
mathjax3_config = {
'HTML-CSS': {
'matchFontHeight': False,
'fonts': ['Latin-Modern', 'TeX']
}
}
math_numfig = True
math_number_all = True
numfig_secnum_depth = 0
autosummary_generate = True
numpydoc_show_class_members = False
autosectionlabel_prefix_document = True
nbsphinx_allow_errors = True
# configure HTML information
html_theme = 'sphinx_rtd_theme'
def clean_directories() -> None:
"""Clean directories that will be generated."""
for name in ['_api', '_downloads', '_notebooks']:
shutil.rmtree(source_path / name, ignore_errors=True)
def process_notebooks() -> None:
"""Copy notebook files to _notebooks and _downloads, resetting executing counts and replacing domains with Markdown
equivalents.
"""
for notebook_path in Path(source_path / 'notebooks').glob('**/*.ipynb'):
notebook = json.loads(notebook_path.read_text())
download = copy.deepcopy(notebook)
# extract parts of the path relative to the notebooks directory and construct the directory's relative location
relative_parts = notebook_path.relative_to(source_path).parts[1:]
relative_location = '../' * len(relative_parts)
# manipulate notebook cells
for notebook_cell, download_cell in zip(notebook['cells'], download['cells']):
# reset download execution counts
for data in [download_cell] + download_cell.get('outputs', []):
if 'execution_count' in data:
data['execution_count'] = 1
# replace supported Sphinx domains with Markdown equivalents
if notebook_cell['cell_type'] == 'markdown':
for source_index, notebook_source in enumerate(notebook_cell['source']):
for role, content in re.findall(':([a-z]+):`([^`]+)`', notebook_source):
domain = f':{role}:`{content}`'
if role == 'ref':
document, text = content.split(':', 1)
section = re.sub(r'-+', '-', re.sub('[^0-9a-zA-Z]+', '-', text)).strip('-').lower()
elif role in {'mod', 'func', 'class', 'meth', 'attr', 'exc'}:
text = f'`{content}`'
section = f'{project}.{content}'
document = f'_api/{project}.{content}'
if role == 'mod':
section = f'module-{section}'
elif role == 'attr':
document = document.rsplit('.', 1)[0]
else:
raise NotImplementedError(f"The domain '{domain}' is not supported.")
# replace the domain with Markdown equivalents (reStructuredText doesn't support linked code)
notebook_cell['source'][source_index] = notebook_cell['source'][source_index].replace(
domain,
f'[{text.strip("`")}]({relative_location}{document}.rst#{section})'
)
download_cell['source'][source_index] = download_cell['source'][source_index].replace(
domain,
f'[{text}]({rtd_url}/{document}.html#{section})'
)
# save the updated notebook files
for updated, location in [(download, '_downloads'), (notebook, '_notebooks')]:
updated_path = source_path / Path(location, *relative_parts)
updated_path.parent.mkdir(parents=True, exist_ok=True)
updated_path.write_text(json.dumps(updated, indent=1, sort_keys=True, separators=(', ', ': ')))
def process_signature(*args: Any) -> Optional[Tuple[str, str]]:
"""Strip type hints from signatures."""
signature = args[5]
if signature is None:
return None
assert isinstance(signature, str)
node = ast.parse(f'def f{signature}: pass').body[0]
assert isinstance(node, ast.FunctionDef)
node.returns = None
if node.args.args:
for arg in node.args.args:
arg.annotation = None
return astunparse.unparse(node).splitlines()[2][5:-1], ''
def setup(app: sphinx.application.Sphinx) -> None:
"""Clean directories, process notebooks, configure extra resources, and strip type hints."""
clean_directories()
process_notebooks()
app.connect('autodoc-process-signature', process_signature)
| 6,312 | 39.729032 | 119 | py |
MCSE | MCSE-master/simcse_to_huggingface.py | """
Convert SimCSE's checkpoints to Huggingface style.
code from https://github.com/princeton-nlp/SimCSE
"""
import argparse
import torch
import os
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, help="Path of SimCSE checkpoint folder")
args = parser.parse_args()
print("SimCSE checkpoint -> Huggingface checkpoint for {}".format(args.path))
state_dict = torch.load(os.path.join(args.path, "pytorch_model.bin"), map_location=torch.device("cpu"))
new_state_dict = {}
for key, param in state_dict.items():
if "mlp" in key:
key = key.replace("mlp", "pooler")
# Delete "bert" or "roberta" prefix
if "bert." in key:
key = key.replace("bert.", "")
if "roberta." in key:
key = key.replace("roberta.", "")
new_state_dict[key] = param
torch.save(new_state_dict, os.path.join(args.path, "pytorch_model.bin"))
# Change architectures in config.json
config = json.load(open(os.path.join(args.path, "config.json")))
for i in range(len(config["architectures"])):
config["architectures"][i] = config["architectures"][i].replace("ForCL", "Model")
json.dump(config, open(os.path.join(args.path, "config.json"), "w"), indent=2)
if __name__ == "__main__":
main()
| 1,340 | 29.477273 | 107 | py |
MCSE | MCSE-master/src/utils.py | import sys
import torch
# Set path to SentEval
PATH_TO_SENTEVAL = './SentEval'
PATH_TO_DATA = './SentEval/data'
# Import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
def evaluate(model, tokenizer):
def prepare(params, samples):
return
def batcher(params, batch):
sentences = [' '.join(s) for s in batch]
batch = tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
)
for k in batch:
batch[k] = batch[k].to('cuda')
with torch.no_grad():
outputs = model(**batch, output_hidden_states=True, return_dict=True)
return outputs.last_hidden_state[:, 0].cpu() # unpooled [CLS] output in BERT
# Set params for SentEval (fastmode)
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
se = senteval.engine.SE(params, batcher, prepare)
tasks = ['STSBenchmark']
results = se.eval(tasks)
stsb_spearman = results['STSBenchmark']['dev']['spearman'][0]
stsb_align = results['STSBenchmark']['dev']['align_loss']
stsb_uniform = results['STSBenchmark']['dev']['uniform_loss']
metrics = {"eval_stsb_spearman": stsb_spearman,
"eval_stsb_align": stsb_align,
"eval_stsb_uniform": stsb_uniform}
return metrics
def inf_train_gen(trainloader):
while True:
for batch in trainloader:
yield batch | 1,592 | 30.235294 | 84 | py |
MCSE | MCSE-master/src/model.py | import torch
import torch.nn as nn
from transformers.models.bert.modeling_bert import BertPreTrainedModel, BertModel, BertLMPredictionHead
from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel, RobertaModel, RobertaLMHead
from transformers.modeling_outputs import SequenceClassifierOutput, BaseModelOutputWithPoolingAndCrossAttentions
class MLPLayer(nn.Module):
"""
Head for getting sentence representations over RoBERTa/BERT's CLS representation.
"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.dense = nn.Linear(in_dim, out_dim)
self.activation = nn.Tanh()
def forward(self, features, **kwargs):
x = self.dense(features)
x = self.activation(x) # non-linear activation
return x
class Similarity(nn.Module):
"""
Dot product or cosine similarity
"""
def __init__(self, temp):
super().__init__()
self.temp = temp
self.cos = nn.CosineSimilarity(dim=-1)
def forward(self, x, y):
return self.cos(x, y) / self.temp
class BertForCL(BertPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config, *model_args, **model_kargs):
super().__init__(config)
self.args = model_kargs['model_args']
self.bert = BertModel(config)
self.pooler = MLPLayer(config.hidden_size, config.hidden_size)
self.init_weights()
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=True,
):
# Flatten input for encoding
input_ids = input_ids.view((-1, input_ids.size(-1))) # (bs * num_sent, len)
attention_mask = attention_mask.view((-1, attention_mask.size(-1))) # (bs * num_sent len)
if token_type_ids is not None:
token_type_ids = token_type_ids.view((-1, token_type_ids.size(-1))) # (bs * num_sent, len)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=True,
)
pooler_output = self.pooler(outputs.last_hidden_state[:, 0])
return BaseModelOutputWithPoolingAndCrossAttentions(
pooler_output=pooler_output,
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
)
class RobertaForCL(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config, *model_args, **model_kargs):
super().__init__(config)
self.args = model_kargs['model_args']
self.roberta = RobertaModel(config)
self.pooler = MLPLayer(config.hidden_size, config.hidden_size)
self.init_weights()
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=True,
):
# Flatten input for encoding
input_ids = input_ids.view((-1, input_ids.size(-1))) # (bs * num_sent, len)
attention_mask = attention_mask.view((-1, attention_mask.size(-1))) # (bs * num_sent len)
if token_type_ids is not None:
token_type_ids = token_type_ids.view((-1, token_type_ids.size(-1))) # (bs * num_sent, len)
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=True,
)
pooler_output = self.pooler(outputs.last_hidden_state[:, 0])
return BaseModelOutputWithPoolingAndCrossAttentions(
pooler_output=pooler_output,
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
)
class ResNetVisnModel(nn.Module):
def __init__(self, feature_dim, proj_dim):
super().__init__()
self.mlp = MLPLayer(feature_dim, proj_dim) # visual features -> grounding space
def forward(self, x):
x = self.mlp(x)
x = x / x.norm(2, dim=-1, keepdim=True)
return x
class MCSE(nn.Module):
def __init__(self, lang_model, visn_model, args):
super().__init__()
self.args = args
self.lang_model = lang_model
self.visn_model = visn_model
self.grounding = MLPLayer(args.hidden_size, args.proj_dim) # sent embeddings -> grounding space
self.sim = Similarity(temp=self.args.temp)
self.sim_vl = Similarity(temp=self.args.temp_vl)
self.loss_fct = nn.CrossEntropyLoss()
def forward(self, batch):
lang_output = self.lang_model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'] if 'position_ids' in batch.keys() else None,
position_ids=batch['position_ids'] if 'position_ids' in batch.keys() else None)
batch_size = batch['input_ids'].size(0)
num_sent = batch['input_ids'].size(1)
# [bs*2, hidden] -> [bs, 2, hidden]
lang_pooled_output = lang_output.last_hidden_state[:, 0].view((batch_size, num_sent, -1))
lang_projection = lang_output.pooler_output.view((batch_size, num_sent, -1)) # [bs, 2, hidden], output of additional MLP layer
return lang_pooled_output, lang_projection
def compute_loss(self, batch, cal_inter=False):
l_pool, l_proj = self.forward(batch)
# Separate representation
z1, z2 = l_proj[:, 0], l_proj[:, 1] # (bs, hidden)
cos_sim = self.sim(z1.unsqueeze(1), z2.unsqueeze(0)) # (bs, bs)
labels = torch.arange(cos_sim.size(0)).long().to(self.args.device) # [0, 1, bs-1] (bs)
loss = self.loss_fct(cos_sim, labels) # unsup: bs-1 negatives
if not cal_inter:
return loss
else:
v = self.visn_model(batch['img']) # [bs, proj_dim]
l2v_proj = self.grounding(l_pool) # [bs, 2, proj_dim], output for vision grounding
l2v_proj = l2v_proj / l2v_proj.norm(2, dim=-1, keepdim=True)
p1, p2 = l2v_proj[:, 0], l2v_proj[:, 1] # (bs, proj)
cos_sim_p0 = self.sim_vl(p1.unsqueeze(1), v.unsqueeze(0)) # (bs, bs)
cos_sim_p1 = self.sim_vl(p2.unsqueeze(1), v.unsqueeze(0))
inter_loss = (self.loss_fct(cos_sim_p0, labels) + self.loss_fct(cos_sim_p1, labels)) / 2
return loss, inter_loss
| 7,235 | 35.730964 | 137 | py |
MCSE | MCSE-master/src/data.py | import torch
from torch.utils.data import Dataset
import h5py
import numpy as np
from torchvision.datasets.folder import default_loader
class ImgSentDataset(Dataset):
def __init__(self,
text_file,
feature_file=None,
shuffle_imgs=False,
random_imgs=False,
shot=-1):
self.text_file = text_file
self.feature_file = feature_file
self.shuffle_imgs = shuffle_imgs
self.random_imgs = random_imgs
self.shot = shot
self.raw_dataset = self.load_data()
def load_data(self):
data = []
sentonly = True if self.feature_file is None else False
# loading sentences
with open(self.text_file, 'r') as f:
sentences = [l.strip() for l in f.readlines()]
N = len(sentences)
# loading image features
if not sentonly:
with h5py.File(self.feature_file, "r") as f:
imgs = torch.from_numpy(np.array(f['features']))
if self.shuffle_imgs:
print('Ablation study: shuffling the imgs ')
index = np.random.choice(N, N, replace=False)
imgs = imgs[index]
if self.random_imgs:
print('Ablation study: select random imgs ')
index = np.random.choice(N, N, replace=True)
imgs = imgs[index]
for sent, img in zip(sentences, imgs):
d = {'sent': sent, 'img': img}
data.append(d)
else:
for sent in sentences:
d = {'sent': sent}
data.append(d)
if self.shot > 0:
index = np.random.choice(N, self.shot, replace=False)
data = np.array(data)[index].tolist()
return data
def __len__(self):
return len(self.raw_dataset)
def __getitem__(self, item:int):
datum = self.raw_dataset[item]
return datum
| 1,990 | 25.905405 | 65 | py |
MCSE | MCSE-master/src/evaluation.py | import sys
import os
import logging
import argparse
from prettytable import PrettyTable
import torch
from transformers import AutoModel, AutoTokenizer
# Set PATHs
PATH_TO_SENTEVAL = './SentEval'
PATH_TO_DATA = './SentEval/data'
# Import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
def print_full_table(task_names, scores, aligns, uniforms, logging):
tb = PrettyTable()
tb.field_names = task_names
tb.add_row(scores)
tb.add_row(aligns)
tb.add_row(uniforms)
print(tb)
logging.info(tb)
def print_table(task_names, scores, logging):
tb = PrettyTable()
tb.field_names = task_names
tb.add_row(scores)
print(tb)
logging.info(tb)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str,
help="Transformers' model name or path")
parser.add_argument("--pooler", type=str,
choices=['cls', 'cls_before_pooler', 'avg', 'avg_top2', 'avg_first_last'],
default='cls',
help="Which pooler to use")
parser.add_argument("--mode", type=str,
choices=['dev', 'test', 'fasttest'],
default='test',
help="What evaluation mode to use (dev: fast mode, dev results; test: full mode, test results); fasttest: fast mode, test results")
parser.add_argument("--task_set", type=str,
choices=['sts', 'transfer', 'full', 'na'],
default='sts',
help="What set of tasks to evaluate on. If not 'na', this will override '--tasks'")
parser.add_argument("--tasks", type=str, nargs='+',
default=['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC',
'SICKRelatedness', 'STSBenchmark'],
help="Tasks to evaluate on. If '--task_set' is specified, this will be overridden")
args = parser.parse_args()
# Set up logger
logfile = os.path.join(args.model_name_or_path, 'eval_results.log')
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG, filename=logfile)
# Load transformers' model checkpoint
model = AutoModel.from_pretrained(args.model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# Set up the tasks
if args.task_set == 'sts':
args.tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']
elif args.task_set == 'transfer':
args.tasks = ['MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC']
elif args.task_set == 'full':
args.tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']
args.tasks += ['MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC']
# Set params for SentEval
if args.mode == 'dev' or args.mode == 'fasttest':
# Fast mode
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
elif args.mode == 'test':
# Full mode
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10}
params['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
'tenacity': 5, 'epoch_size': 4}
else:
raise NotImplementedError
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch, max_length=None):
# Handle rare token encoding issues in the dataset
if len(batch) >= 1 and len(batch[0]) >= 1 and isinstance(batch[0][0], bytes):
batch = [[word.decode('utf-8') for word in s] for s in batch]
sentences = [' '.join(s) for s in batch]
# Tokenization
if max_length is not None:
batch = tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
max_length=max_length,
truncation=True
)
else:
batch = tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
)
# Move to the correct device
for k in batch:
batch[k] = batch[k].to(device)
# Get raw embeddings
with torch.no_grad():
outputs = model(**batch, output_hidden_states=True, return_dict=True)
last_hidden = outputs.last_hidden_state
pooler_output = outputs.pooler_output
hidden_states = outputs.hidden_states
# Apply different poolers
if args.pooler == 'cls':
# There is a linear+activation layer after CLS representation
return pooler_output.cpu()
elif args.pooler == 'cls_before_pooler':
return last_hidden[:, 0].cpu()
elif args.pooler == "avg":
return ((last_hidden * batch['attention_mask'].unsqueeze(-1)).sum(1) / batch['attention_mask'].sum(
-1).unsqueeze(-1)).cpu()
elif args.pooler == "avg_first_last":
first_hidden = hidden_states[0]
last_hidden = hidden_states[-1]
pooled_result = ((first_hidden + last_hidden) / 2.0 * batch['attention_mask'].unsqueeze(-1)).sum(1) / batch[
'attention_mask'].sum(-1).unsqueeze(-1)
return pooled_result.cpu()
elif args.pooler == "avg_top2":
second_last_hidden = hidden_states[-2]
last_hidden = hidden_states[-1]
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * batch['attention_mask'].unsqueeze(-1)).sum(1) / \
batch['attention_mask'].sum(-1).unsqueeze(-1)
return pooled_result.cpu()
else:
raise NotImplementedError
results = {}
for task in args.tasks:
se = senteval.engine.SE(params, batcher, prepare)
result = se.eval(task)
results[task] = result
# Print evaluation results
if args.mode == 'dev':
print("------ %s ------" % (args.mode))
logging.info("------ %s ------" % (args.mode))
task_names = []
scores = []
for task in ['STSBenchmark', 'SICKRelatedness']:
task_names.append(task)
if task in results:
scores.append("%.2f" % (results[task]['dev']['spearman'][0] * 100))
else:
scores.append("0.00")
print_table(task_names, scores, logging)
task_names = []
scores = []
for task in ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']:
task_names.append(task)
if task in results:
scores.append("%.2f" % (results[task]['devacc']))
else:
scores.append("0.00")
task_names.append("Avg.")
scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
print_table(task_names, scores, logging)
elif args.mode == 'test' or args.mode == 'fasttest':
print("------ %s ------" % (args.mode))
logging.info("------ %s ------" % (args.mode))
task_names = []
scores = []
aligns = []
uniforms = []
for task in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']:
task_names.append(task)
if task in results:
if task in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16']:
scores.append("%.2f" % (results[task]['all']['spearman']['all'] * 100))
aligns.append("%.3f" % (results[task]['all']['align_loss']['all']))
uniforms.append("%.3f" % (results[task]['all']['uniform_loss']['all']))
else:
scores.append("%.2f" % (results[task]['test']['spearman'].correlation * 100)) # for STSB and SICK.
aligns.append("%.3f" % (results[task]['test']['align_loss']))
uniforms.append("%.3f" % (results[task]['test']['uniform_loss']))
else:
scores.append("0.00")
task_names.append("Avg.")
scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
aligns.append("%.3f" % (sum([float(score) for score in aligns]) / len(aligns)))
uniforms.append("%.3f" % (sum([float(score) for score in uniforms]) / len(uniforms)))
# print_table(task_names, scores, logging)
print_full_table(task_names, scores, aligns, uniforms, logging)
#task_names = []
#scores = []
#for task in ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']:
# task_names.append(task)
# if task in results:
# scores.append("%.2f" % (results[task]['devacc']))
# else:
# scores.append("0.00")
#task_names.append("Avg.")
#scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
#print_table(task_names, scores, logging)
if __name__ == "__main__":
main()
| 9,443 | 39.706897 | 155 | py |
MCSE | MCSE-master/src/train.py | import argparse
import logging
import math
import os
import random
import datasets
from torch.utils.data.dataloader import DataLoader
import torch
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
SchedulerType,
default_data_collator,
get_scheduler,
set_seed,
)
from data import ImgSentDataset
from model import MCSE, BertForCL, RobertaForCL, ResNetVisnModel
from utils import evaluate
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--framework",
type=str,
default="simcse",
help="The framework to use.",
choices=["simcse", "mcse"]
)
parser.add_argument(
"--model_name_or_path",
type=str,
default="bert-base-uncased",
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--text_file",
type=str,
default=None,
help="A .txt file containing the training sentences."
)
parser.add_argument(
"--feature_file",
type=str,
default=None,
help="A .hdf5 file containing the image features (e.g. ResNet50 features)."
)
parser.add_argument(
"--shuffle_imgs",
action="store_true",
help="Ablation study for random imgs",
)
parser.add_argument(
"--random_imgs",
action="store_true",
help="Ablation study for random imgs",
)
parser.add_argument(
"--output_dir",
type=str,
default="result/",
help="Where to store the final model.")
parser.add_argument(
"--max_seq_length",
type=int,
default=32,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=64,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=64,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=3e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--weight_decay",
type=float,
default=0.0,
help="Weight decay to use.")
parser.add_argument(
"--num_train_epochs",
type=int,
default=3,
help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--max_grad_norm",
type=float,
default=1.0,
help="Maximum gradient norm for gradient clipping."
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps",
type=int,
default=0,
help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--seed",
type=int,
default=0,
help="A seed for reproducible training. We used [0,1,2,3,4] in experiments.")
parser.add_argument(
"--temp",
type=float,
default=0.05,
help="Temperature for softmax.")
parser.add_argument(
"--temp_vl",
type=float,
default=0.05,
help="Temperature for cross-modality contrastive learning"
)
parser.add_argument(
"--hidden_size",
type=int,
default=768,
help="Text embedding dimention of pooled output (mlp)")
parser.add_argument(
"--proj_dim",
type=int,
default=256,
help="Projection dimension in grounding space")
parser.add_argument(
"--lbd",
type=float,
default=0.01,
help="weight for inter-modality loss")
parser.add_argument(
"--eval_steps",
type=int,
default=125,
help="evaluation step interval")
parser.add_argument(
"--metric_for_best_model",
type=str,
default='stsb_spearman',
help="for saving best checkpoint")
parser.add_argument(
"--gradient_accumulation_steps",
type = int,
default = 1,
help = "Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument(
"--shot",
type=int,
default=-1,
help="few-shot setting")
args = parser.parse_args()
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
return args
def main():
args = parse_args()
print(args)
# Initialize the accelerator.
accelerator = Accelerator()
args.device = accelerator.device
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
set_seed(args.seed)
# Load pretrained tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
config = AutoConfig.from_pretrained(args.model_name_or_path)
if 'roberta' in args.model_name_or_path:
lang_model = RobertaForCL.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
model_args=args
)
elif 'bert' in args.model_name_or_path:
lang_model = BertForCL.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
model_args=args
)
else:
raise NotImplementedError
if args.framework.lower() == 'mcse':
visn_model = ResNetVisnModel(2048, args.proj_dim)
else:
visn_model = None
model = MCSE(lang_model, visn_model, args)
# Define collator function
def data_collator(batch):
keys = batch[0].keys()
sentences = [b['sent'] for b in batch]
new_batch = {}
total = len(sentences)
# tokenization
tokenized_sents = tokenizer(
sentences,
max_length=args.max_seq_length,
truncation=True,
padding="max_length" if args.pad_to_max_length else 'longest',
return_tensors='pt'
)
if 'img' in keys:
new_batch['img'] = torch.stack([batch[i]['img'] for i in range(total)])
for key in ['input_ids', 'attention_mask', 'token_type_ids', 'position_ids']:
# (bs, len) -> (bs, 2, len)
if key in tokenized_sents.keys():
new_batch[key] = tokenized_sents[key].unsqueeze(1).repeat(1, 2, 1)
return new_batch
# dataset and dataloader
train_dataset = ImgSentDataset(text_file=args.text_file,
feature_file=args.feature_file,
shuffle_imgs=args.shuffle_imgs,
random_imgs=args.random_imgs,
shot=args.shot)
train_dataloader = DataLoader(train_dataset,
shuffle=True,
batch_size=args.per_device_train_batch_size,
collate_fn=data_collator)
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Optimizer
# Split weights in two groups, one with weight decay and the other not. Same as examples in huggingface and sentence-transformer.
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader = accelerator.prepare(model, optimizer, train_dataloader)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
#num_update_steps_per_epoch = math.ceil(num_update_steps_per_epoch * args.percent)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
set_seed(args.seed) # for sake of the status change of sampler
# Train! num_processes -> 1
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Train train batch size (w.parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
best_metric = 0
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_dataloader):
model.train()
if args.framework.lower() == 'mcse':
intra_loss, inter_loss = model.compute_loss(batch, cal_inter=True)
loss = intra_loss + args.lbd * inter_loss
else:
loss = model.compute_loss(batch, cal_inter=False)
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if (step+1) % args.gradient_accumulation_steps == 0 or step == len(train_dataloader)-1:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if (step+1) % args.gradient_accumulation_steps == 0 and (completed_steps % args.eval_steps == 0 or completed_steps >= args.max_train_steps):
logger.info("***** Start evaluation *****")
model.eval()
metrics = evaluate(model.lang_model, tokenizer)
logger.info(f" step {completed_steps}: eval_stsb_spearman = {metrics['eval_stsb_spearman']}")
if metrics['eval_'+args.metric_for_best_model] > best_metric:
# evaluation
best_metric = metrics['eval_'+args.metric_for_best_model]
# save (1) pytorch_model.bin (2) config.json
logger.info("Saving best model checkpoint to %s", args.output_dir)
accelerator.wait_for_everyone() # wait for all processes to reach that point in the script
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.lang_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if args.framework.lower() == 'mcse':
accelerator.save(
{
'visn_model': unwrapped_model.visn_model.state_dict(),
'grounding': unwrapped_model.grounding.state_dict()
},
os.path.join(args.output_dir, 'mcse.pt')
)
if completed_steps >= args.max_train_steps:
path = os.path.join(args.output_dir, 'final_checkpoint')
logger.info("Saving final checkpoint to %s", path)
tokenizer.save_pretrained(path)
accelerator.wait_for_everyone() # wait for all processes to reach that point in the script
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.lang_model.save_pretrained(path, save_function=accelerator.save)
if args.framework.lower() == 'mcse':
accelerator.save(
{
'visn_model': unwrapped_model.visn_model.state_dict(),
'grounding': unwrapped_model.grounding.state_dict()
},
os.path.join(path, 'mcse.pt')
)
break
logger.info("Training completed.")
if __name__ == "__main__":
main() | 15,256 | 34.399072 | 155 | py |
MCSE | MCSE-master/src/train_mix.py | import argparse
import logging
import math
import os
import datasets
from torch.utils.data.dataloader import DataLoader
import torch
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
SchedulerType,
default_data_collator,
get_scheduler,
set_seed,
)
from data import ImgSentDataset
from model import MCSE, BertForCL, RobertaForCL, ResNetVisnModel
from utils import evaluate, inf_train_gen
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--framework",
type=str,
default="simcse",
help="The framework to use.",
choices=["simcse", "mcse"]
)
parser.add_argument(
"--model_name_or_path",
type=str,
default="bert-base-uncased",
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--caption_file",
type=str,
default=None,
help="A .txt file containing the caption sentences."
)
parser.add_argument(
"--feature_file",
type=str,
default=None,
help="A .hdf5 file containing the image features (e.g. ResNet50 features)."
)
parser.add_argument(
"--text_file",
type=str,
default=None,
help="A .txt file of unlabelled wiki sentences."
)
parser.add_argument(
"--shuffle_imgs",
action="store_true",
help="Ablation study for random imgs",
)
parser.add_argument(
"--random_imgs",
action="store_true",
help="Ablation study for random imgs",
)
parser.add_argument(
"--output_dir",
type=str,
default="result/",
help="Where to store the final model.")
parser.add_argument(
"--max_seq_length",
type=int,
default=32,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=64,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=64,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=3e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--weight_decay",
type=float,
default=0.0,
help="Weight decay to use.")
parser.add_argument(
"--num_train_epochs",
type=int,
default=6,
help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--max_grad_norm",
type=float,
default=1.0,
help="Maximum gradient norm for gradient clipping."
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps",
type=int,
default=0,
help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--seed",
type=int,
default=0,
help="A seed for reproducible training. We used [0,1,2,3,4] in experiments.")
parser.add_argument(
"--temp",
type=float,
default=0.05,
help="Temperature for softmax.")
parser.add_argument(
"--temp_vl",
type=float,
default=0.05,
help="Temperature for cross-modality contrastive learning"
)
parser.add_argument(
"--hidden_size",
type=int,
default=768,
help="Text embedding dimention of pooled output (mlp)")
parser.add_argument(
"--proj_dim",
type=int,
default=256,
help="Projection dimension in grounding space")
parser.add_argument(
"--lbd",
type=float,
default=0.05,
help="weight for inter-modality loss")
parser.add_argument(
"--eval_steps",
type=int,
default=125,
help="evaluation step interval")
parser.add_argument(
"--metric_for_best_model",
type=str,
default='stsb_spearman',
help="for saving best checkpoint")
parser.add_argument(
"--gradient_accumulation_steps",
type = int,
default = 1,
help = "Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument(
"--shot",
type=int,
default=-1,
help="few-shot setting")
args = parser.parse_args()
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
return args
def main():
args = parse_args()
print(args)
# Initialize the accelerator.
accelerator = Accelerator()
args.device = accelerator.device
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
set_seed(args.seed)
# Load pretrained tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
config = AutoConfig.from_pretrained(args.model_name_or_path)
if 'roberta' in args.model_name_or_path:
lang_model = RobertaForCL.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
model_args=args
)
elif 'bert' in args.model_name_or_path:
lang_model = BertForCL.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
model_args=args
)
else:
raise NotImplementedError
if args.framework.lower() == 'mcse':
visn_model = ResNetVisnModel(2048, args.proj_dim)
else:
visn_model = None
model = MCSE(lang_model, visn_model, args)
# Define collator function
def data_collator(batch):
keys = batch[0].keys()
sentences = [b['sent'] for b in batch]
new_batch = {}
total = len(sentences)
# tokenization
tokenized_sents = tokenizer(
sentences,
max_length=args.max_seq_length,
truncation=True,
padding="max_length" if args.pad_to_max_length else 'longest',
return_tensors='pt'
)
if 'img' in keys:
new_batch['img'] = torch.stack([batch[i]['img'] for i in range(total)])
for key in ['input_ids', 'attention_mask', 'token_type_ids', 'position_ids']:
# (bs, len) -> (bs, 2, len)
if key in tokenized_sents.keys():
new_batch[key] = tokenized_sents[key].unsqueeze(1).repeat(1, 2, 1)
return new_batch
# dataset and dataloader (it's better to implement it by Sampler)
train_dataset_textonly = ImgSentDataset(text_file = args.text_file, feature_file = None)
train_dataloader_textonly = DataLoader(train_dataset_textonly,
shuffle=True,
batch_size=args.per_device_train_batch_size,
collate_fn=data_collator)
textonly_loader = inf_train_gen(train_dataloader_textonly)
train_dataset_pair = ImgSentDataset(text_file=args.caption_file, feature_file=args.feature_file)
train_dataloader_pair = DataLoader(train_dataset_pair,
shuffle=True,
batch_size=args.per_device_train_batch_size,
collate_fn=data_collator)
pair_loader = inf_train_gen(train_dataloader_pair)
# Optimizer
# Split weights in two groups, one with weight decay and the other not. Same as examples in huggingface and sentence-transformer.
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer = accelerator.prepare(model, optimizer)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil((len(train_dataloader_textonly) + len(train_dataloader_pair)) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
set_seed(args.seed) # for sake of the status change of sampler
# Train! num_processes -> 1
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num text examples = {len(train_dataset_textonly)}")
logger.info(f" Num paired examples = {len(train_dataset_pair)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Train train batch size (w.parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
best_metric = 0
paired_sample_step = len(train_dataloader_textonly) // len(train_dataloader_pair)
for epoch in range(args.num_train_epochs):
for step in range(num_update_steps_per_epoch):
model.train()
if step % paired_sample_step == 0:
batch = next(pair_loader)
for key in batch.keys():
batch[key] = batch[key].to('cuda')
else:
batch = next(textonly_loader)
for key in batch.keys():
batch[key] = batch[key].to('cuda')
if step % paired_sample_step == 0 and args.framework.lower() == 'mcse':
intra_loss, inter_loss = model.compute_loss(batch, cal_inter=True)
loss = intra_loss + args.lbd * inter_loss
else:
loss = model.compute_loss(batch, cal_inter=False)
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if (step+1) % args.gradient_accumulation_steps == 0 or step == num_update_steps_per_epoch-1:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if (step+1) % args.gradient_accumulation_steps == 0 and (completed_steps % args.eval_steps == 0 or completed_steps >= args.max_train_steps):
logger.info("***** Start evaluation *****")
model.eval()
metrics = evaluate(model.lang_model, tokenizer)
logger.info(f" step {completed_steps}: eval_stsb_spearman = {metrics['eval_stsb_spearman']}")
if metrics['eval_'+args.metric_for_best_model] > best_metric:
# evaluation
best_metric = metrics['eval_'+args.metric_for_best_model]
# save (1) pytorch_model.bin (2) config.json
logger.info("Saving best model checkpoint to %s", args.output_dir)
accelerator.wait_for_everyone() # wait for all processes to reach that point in the script
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.lang_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if args.framework.lower() == 'mcse':
accelerator.save(
{
'visn_model': unwrapped_model.visn_model.state_dict(),
'grounding': unwrapped_model.grounding.state_dict()
},
os.path.join(args.output_dir, 'mcse.pt')
)
if completed_steps >= args.max_train_steps:
path = os.path.join(args.output_dir, 'final_checkpoint')
logger.info("Saving final checkpoint to %s", path)
tokenizer.save_pretrained(path)
accelerator.wait_for_everyone() # wait for all processes to reach that point in the script
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.lang_model.save_pretrained(path, save_function=accelerator.save)
if args.framework.lower() == 'mcse':
accelerator.save(
{
'visn_model': unwrapped_model.visn_model.state_dict(),
'grounding': unwrapped_model.grounding.state_dict()
},
os.path.join(path, 'mcse.pt')
)
break
logger.info("Training completed.")
if __name__ == "__main__":
main() | 16,025 | 34.852349 | 155 | py |
MCSE | MCSE-master/preprocess/prepare_coco.py | __author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
import os.path as osp
import random
import argparse
from extract_visn_feature import ResnetFeatureExtractor
def _isArrayLike(obj):
return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if _isArrayLike(catNms) else [catNms]
supNms = supNms if _isArrayLike(supNms) else [supNms]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if _isArrayLike(ids):
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if _isArrayLike(ids):
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if _isArrayLike(ids):
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns, draw_bbox=False):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
"""
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
"""
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
if draw_bbox:
[bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox']
poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]]
np_poly = np.array(poly).reshape((4,2))
polygons.append(Polygon(np_poly))
color.append(c)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
#elif 'segmentation' in anns[0]:
# res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
# for id, ann in enumerate(anns):
# # now only support compressed RLE format as segmentation results
# ann['area'] = maskUtils.area(ann['segmentation'])
# if not 'bbox' in ann:
# ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
# ann['id'] = id+1
# ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def get_coco_caps(ann_dir, out_dir):
ann_file = osp.join(ann_dir, 'captions_train2014.json')
img_ids = COCO(osp.join(ann_dir, 'instances_train2014.json')).imgs.keys()
coco_caps = COCO(ann_file)
captions = []
for id in img_ids:
ann_ids = coco_caps.getAnnIds(imgIds=[id])[:5] # maximum: 6
anns = coco_caps.loadAnns(ann_ids)
ann_caps = [ann['caption'] for ann in anns]
allcaptions.append(ann_caps)
captions.extend(random.sample(ann_caps, 1))
curr_paths_fname = osp.join(out_dir, 'coco_random_captions.txt')
print("\tSave captions to ", curr_paths_fname)
with open(curr_paths_fname, 'w') as f:
for cap in captions:
cap = ' '.join(cap.strip().split('\n'))
f.write(cap + "\n")
return img_ids
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ann_dir', type=str, default='mscoco/annotations')
parser.add_argument('--image_dir', type=str, default='mscoco/images/train2014')
parser.add_argument('--output_dir', type=str, default='data/')
parser.add_argument('--batch_size', type=int, default=32)
args = parser.parse_args()
# get random captions
img_ids = get_coco_caps(args.ann_dir, args.output_dir)
# extract image features
extractor = ResnetFeatureExtractor(args.images_dir, args.output_dir, args.batch_size)
extractor.extract_vision_features(dataname='coco',
img_ids=img_ids)
| 19,410 | 41.197826 | 128 | py |
MCSE | MCSE-master/preprocess/extract_visn_feature.py | import os.path as osp
import h5py
import tqdm
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.models as models
from torchvision.datasets.folder import default_loader
def get_visn_arch(arch):
try:
return getattr(models, arch)
except AttributeError as e:
print(e)
print("There is no arch %s in torchvision." % arch)
class ResNet(nn.Module):
def __init__(self, arch='resnet50', pretrained=True):
"""
:param dim: dimension of the output
:param arch: backbone architecture,
:param pretrained: load feature with pre-trained vector
:param finetuning: finetune the model
"""
super().__init__()
# Setup Backbone
resnet = get_visn_arch(arch)(pretrained=pretrained)
for param in resnet.parameters():
param.requires_grad = False
resnet.fc = nn.Identity()
self.backbone = resnet
def forward(self, img):
"""
:param img: a tensor of shape [batch_size, H, W, C]
:return: a tensor of [batch_size, d]
"""
x = self.backbone(img)
x = x.detach()
return x
class ResnetFeatureExtractor():
def __init__(self, image_dir, output_dir, batch_size):
self.model= ResNet(arch='resnet50').eval().cuda()
self.image_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.image_dir = image_dir
self.output_dir = output_dir
self.batch_size = batch_size
def extract_vision_features(self, dataname, img_ids):
print('Start extracting resnet features...')
if dataname=='coco':
img_paths = [osp.join(self.image_dir, 'COCO_val2014_'+str(id).zfill(12)+'.jpg') for id in img_ids ]
else:
img_paths = [ osp.join(self.image_dir, id+'.jpg') for id in img_ids]
tensor_imgs = []
img_feats = []
last_dim = -1
for i, img_path in enumerate(tqdm.tqdm(img_paths)):
pil_img = default_loader(img_path)
tensor_imgs.append(self.image_transform(pil_img))
if len(tensor_imgs) == self.batch_size:
visn_input = torch.stack(tensor_imgs).cuda() #torch.Size([32, 3, 224, 224])
with torch.no_grad():
visn_output = self.model(visn_input) # torch.Size([32, 2048])
if last_dim == -1:
last_dim = visn_output.shape[-1] # 2048
img_feats.extend(visn_output.detach().cpu().numpy())
tensor_imgs = []
if len(tensor_imgs) > 0:
visn_input = torch.stack(tensor_imgs).cuda()
with torch.no_grad():
visn_output = self.model(visn_input)
# Saved the features in hdf5
img_feats.extend(visn_output.detach().cpu().numpy())
assert len(img_feats) == len(img_paths)
# Save features
h5_path = osp.join(self.output_dir, '%s.hdf5'%dataname)
print(f"\tSave features to {h5_path} with hdf5 dataset 'features'.")
h5_file = h5py.File(h5_path, 'w')
dset = h5_file.create_dataset("features", (len(img_paths), last_dim))
for i, img_feat in enumerate(img_feats):
dset[i] = img_feat
h5_file.close()
| 3,518 | 32.198113 | 112 | py |
MCSE | MCSE-master/preprocess/prepare_flickr.py | import xml.etree.ElementTree as ET
import argparse
import os.path as osp
import tqdm
import random
from extract_visn_feature import ResnetFeatureExtractor
def get_sentence_data(fn):
"""
Parses a sentence file from the Flickr30K Entities dataset
input:
fn - full file path to the sentence file to parse
output:
a list of dictionaries for each sentence with the following fields:
sentence - the original sentence
phrases - a list of dictionaries for each phrase with the
following fields:
phrase - the text of the annotated phrase
first_word_index - the position of the first word of
the phrase in the sentence
phrase_id - an identifier for this phrase
phrase_type - a list of the coarse categories this
phrase belongs to
"""
with open(fn, 'r') as f:
sentences = f.read().split('\n')
annotations = []
for sentence in sentences:
if not sentence:
continue
first_word = []
phrases = []
phrase_id = []
phrase_type = []
words = []
current_phrase = []
add_to_phrase = False
for token in sentence.split():
if add_to_phrase:
if token[-1] == ']':
add_to_phrase = False
token = token[:-1]
current_phrase.append(token)
phrases.append(' '.join(current_phrase))
current_phrase = []
else:
current_phrase.append(token)
words.append(token)
else:
if token[0] == '[':
add_to_phrase = True
first_word.append(len(words))
parts = token.split('/')
phrase_id.append(parts[1][3:])
phrase_type.append(parts[2:])
else:
words.append(token)
sentence_data = {'sentence': ' '.join(words), 'phrases': []}
for index, phrase, p_id, p_type in zip(first_word, phrases, phrase_id, phrase_type):
sentence_data['phrases'].append({'first_word_index': index,
'phrase': phrase,
'phrase_id': p_id,
'phrase_type': p_type})
annotations.append(sentence_data)
return annotations
def get_annotations(fn):
"""
Parses the xml files in the Flickr30K Entities dataset
input:
fn - full file path to the annotations file to parse
output:
dictionary with the following fields:
scene - list of identifiers which were annotated as
pertaining to the whole scene
nobox - list of identifiers which were annotated as
not being visible in the image
boxes - a dictionary where the fields are identifiers
and the values are its list of boxes in the
[xmin ymin xmax ymax] format
"""
tree = ET.parse(fn)
root = tree.getroot()
size_container = root.findall('size')[0]
anno_info = {'boxes': {}, 'scene': [], 'nobox': []}
for size_element in size_container:
anno_info[size_element.tag] = int(size_element.text)
for object_container in root.findall('object'):
for names in object_container.findall('name'):
box_id = names.text
box_container = object_container.findall('bndbox')
if len(box_container) > 0:
if box_id not in anno_info['boxes']:
anno_info['boxes'][box_id] = []
xmin = int(box_container[0].findall('xmin')[0].text) - 1
ymin = int(box_container[0].findall('ymin')[0].text) - 1
xmax = int(box_container[0].findall('xmax')[0].text) - 1
ymax = int(box_container[0].findall('ymax')[0].text) - 1
anno_info['boxes'][box_id].append([xmin, ymin, xmax, ymax])
else:
nobndbox = int(object_container.findall('nobndbox')[0].text)
if nobndbox > 0:
anno_info['nobox'].append(box_id)
scene = int(object_container.findall('scene')[0].text)
if scene > 0:
anno_info['scene'].append(box_id)
return anno_info
def get_flickr_caps(split, flickr_entity_dir, output_dir):
# test/val: 1000*5 captions train: 29783*5=148915 captions
caption_path = osp.join(flickr_entity_dir, 'Sentences')
id_path = osp.join(flickr_entity_dir, '%s.txt'%split)
print('Start sampling captions ...')
img_ids = open(id_path, 'r').readlines()
img_ids = [id.strip() for id in img_ids]
sent_paths = [osp.join(caption_path, id + '.txt') for id in img_ids]
captions = []
for i, sent_path in enumerate(tqdm.tqdm(sent_paths)):
annotations = get_sentence_data(sent_path)
sentences = [a['sentence'] for a in annotations]
captions.extend(random.sample(sentences, 1))
# Save captions
curr_paths_fname = osp.join(output_dir, 'flickr_random_captions.txt')
print("Save captions to ", curr_paths_fname)
with open(curr_paths_fname, 'w') as f:
f.write("\n".join(captions))
return img_ids
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--flickr_entities_dir', type=str, default='the_flickr30k_entities_path')
parser.add_argument('--flickr_images_dir', type=str, default='the_flickr30k_images_path')
parser.add_argument('--output_dir', type=str, default='data/')
parser.add_argument('--batch_size', type=int, default=32)
args = parser.parse_args()
# get random captions
img_ids = get_flickr_caps(split='train',
flickr_entity_dir=args.flickr_entities_dir,
output_dir=args.output_dir)
# extract image features
extractor = ResnetFeatureExtractor(args.flickr_images_dir, args.output_dir, args.batch_size)
extractor.extract_vision_features(dataname='flickr',
img_ids=img_ids)
| 6,353 | 36.157895 | 97 | py |
MCSE | MCSE-master/SentEval/setup.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import io
from setuptools import setup, find_packages
with io.open('./README.md', encoding='utf-8') as f:
readme = f.read()
setup(
name='SentEval',
version='0.1.0',
url='https://github.com/facebookresearch/SentEval',
packages=find_packages(exclude=['scripts']),
license='Attribution-NonCommercial 4.0 International',
long_description=readme,
)
| 567 | 24.818182 | 61 | py |
MCSE | MCSE-master/SentEval/__init__.py | 0 | 0 | 0 | py | |
MCSE | MCSE-master/SentEval/examples/infersent.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
InferSent models. See https://github.com/facebookresearch/InferSent.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
import os
import torch
import logging
# get model.py from InferSent repo
from models import InferSent
# Set PATHs
PATH_SENTEVAL = '../'
PATH_TO_DATA = '../data'
PATH_TO_W2V = 'PATH/TO/glove.840B.300d.txt' # or crawl-300d-2M.vec for V2
MODEL_PATH = 'infersent1.pkl'
V = 1 # version of InferSent
assert os.path.isfile(MODEL_PATH) and os.path.isfile(PATH_TO_W2V), \
'Set MODEL and GloVe PATHs'
# import senteval
sys.path.insert(0, PATH_SENTEVAL)
import senteval
def prepare(params, samples):
params.infersent.build_vocab([' '.join(s) for s in samples], tokenize=False)
def batcher(params, batch):
sentences = [' '.join(s) for s in batch]
embeddings = params.infersent.encode(sentences, bsize=params.batch_size, tokenize=False)
return embeddings
"""
Evaluation of trained model on Transfer Tasks (SentEval)
"""
# define senteval params
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
# Load InferSent model
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
model = InferSent(params_model)
model.load_state_dict(torch.load(MODEL_PATH))
model.set_w2v_path(PATH_TO_W2V)
params_senteval['infersent'] = model.cuda()
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,462 | 30.987013 | 92 | py |
MCSE | MCSE-master/SentEval/examples/bow.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
import sys
import io
import numpy as np
import logging
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# PATH_TO_VEC = 'glove/glove.840B.300d.txt'
PATH_TO_VEC = 'fasttext/crawl-300d-2M.vec'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# Create dictionary
def create_dictionary(sentences, threshold=0):
words = {}
for s in sentences:
for word in s:
words[word] = words.get(word, 0) + 1
if threshold > 0:
newwords = {}
for word in words:
if words[word] >= threshold:
newwords[word] = words[word]
words = newwords
words['<s>'] = 1e9 + 4
words['</s>'] = 1e9 + 3
words['<p>'] = 1e9 + 2
sorted_words = sorted(words.items(), key=lambda x: -x[1]) # inverse sort
id2word = []
word2id = {}
for i, (w, _) in enumerate(sorted_words):
id2word.append(w)
word2id[w] = i
return id2word, word2id
# Get word vectors from vocabulary (glove, word2vec, fasttext ..)
def get_wordvec(path_to_vec, word2id):
word_vec = {}
with io.open(path_to_vec, 'r', encoding='utf-8') as f:
# if word2vec or fasttext file : skip first line "next(f)"
for line in f:
word, vec = line.split(' ', 1)
if word in word2id:
word_vec[word] = np.fromstring(vec, sep=' ')
logging.info('Found {0} words with word vectors, out of \
{1} words'.format(len(word_vec), len(word2id)))
return word_vec
# SentEval prepare and batcher
def prepare(params, samples):
_, params.word2id = create_dictionary(samples)
params.word_vec = get_wordvec(PATH_TO_VEC, params.word2id)
params.wvec_dim = 300
return
def batcher(params, batch):
batch = [sent if sent != [] else ['.'] for sent in batch]
embeddings = []
for sent in batch:
sentvec = []
for word in sent:
if word in params.word_vec:
sentvec.append(params.word_vec[word])
if not sentvec:
vec = np.zeros(params.wvec_dim)
sentvec.append(vec)
sentvec = np.mean(sentvec, 0)
embeddings.append(sentvec)
embeddings = np.vstack(embeddings)
return embeddings
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 3,423 | 29.300885 | 82 | py |
MCSE | MCSE-master/SentEval/examples/googleuse.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division
import os
import sys
import logging
import tensorflow as tf
import tensorflow_hub as hub
tf.logging.set_verbosity(0)
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# tensorflow session
session = tf.Session()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
embeddings = params['google_use'](batch)
return embeddings
def make_embed_fn(module):
with tf.Graph().as_default():
sentences = tf.placeholder(tf.string)
embed = hub.Module(module)
embeddings = embed(sentences)
session = tf.train.MonitoredSession()
return lambda x: session.run(embeddings, {sentences: x})
# Start TF session and load Google Universal Sentence Encoder
encoder = make_embed_fn("https://tfhub.dev/google/universal-sentence-encoder-large/2")
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
params_senteval['google_use'] = encoder
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,205 | 31.441176 | 86 | py |
MCSE | MCSE-master/SentEval/examples/models.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This file contains the definition of encoders used in https://arxiv.org/pdf/1705.02364.pdf
"""
import numpy as np
import time
import torch
import torch.nn as nn
class InferSent(nn.Module):
def __init__(self, config):
super(InferSent, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.version = 1 if 'version' not in config else config['version']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim, 1,
bidirectional=True, dropout=self.dpout_model)
assert self.version in [1, 2]
if self.version == 1:
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
elif self.version == 2:
self.bos = '<p>'
self.eos = '</p>'
self.max_pad = False
self.moses_tok = True
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return self.enc_lstm.bias_hh_l0.data.is_cuda
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: (seqlen x bsize x worddim)
sent, sent_len = sent_tuple
# Sort by length (keep idx)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_sort)
sent = sent.index_select(1, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = torch.from_numpy(idx_unsort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_unsort)
sent_output = sent_output.index_select(1, idx_unsort)
# Pooling
if self.pool_type == "mean":
sent_len = torch.FloatTensor(sent_len.copy()).unsqueeze(1).cuda()
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
if not self.max_pad:
sent_output[sent_output == 0] = -1e9
emb = torch.max(sent_output, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2
return emb
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = self.get_batch(sentences[stidx:stidx + bsize])
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward((batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
sent = sent.split() if not tokenize else self.tokenize(sent)
sent = [[self.bos] + [word for word in sent if word in self.word_vec] + [self.eos]]
if ' '.join(sent[0]) == '%s %s' % (self.bos, self.eos):
import warnings
warnings.warn('No words in "%s" have w2v vectors. Replacing \
by "%s %s"..' % (sent, self.bos, self.eos))
batch = self.get_batch(sent)
if self.is_cuda():
batch = batch.cuda()
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
| 9,875 | 36.12782 | 94 | py |
MCSE | MCSE-master/SentEval/examples/gensen.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Clone GenSen repo here: https://github.com/Maluuba/gensen.git
And follow instructions for loading the model used in batcher
"""
from __future__ import absolute_import, division, unicode_literals
import sys
import logging
# import GenSen package
from gensen import GenSen, GenSenSingle
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
_, reps_h_t = gensen.get_representation(
sentences, pool='last', return_numpy=True, tokenize=True
)
embeddings = reps_h_t
return embeddings
# Load GenSen model
gensen_1 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip',
pretrained_emb='../data/embedding/glove.840B.300d.h5'
)
gensen_2 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip_parse',
pretrained_emb='../data/embedding/glove.840B.300d.h5'
)
gensen_encoder = GenSen(gensen_1, gensen_2)
reps_h, reps_h_t = gensen.get_representation(
sentences, pool='last', return_numpy=True, tokenize=True
)
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
params_senteval['gensen'] = gensen_encoder
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,429 | 31.4 | 82 | py |
MCSE | MCSE-master/SentEval/examples/skipthought.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
"""
Example of file for SkipThought in SentEval
"""
import logging
import sys
sys.setdefaultencoding('utf8')
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data/senteval_data/'
PATH_TO_SKIPTHOUGHT = ''
assert PATH_TO_SKIPTHOUGHT != '', 'Download skipthought and set correct PATH'
# import skipthought and Senteval
sys.path.insert(0, PATH_TO_SKIPTHOUGHT)
import skipthoughts
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
def prepare(params, samples):
return
def batcher(params, batch):
batch = [str(' '.join(sent), errors="ignore") if sent != [] else '.' for sent in batch]
embeddings = skipthoughts.encode(params['encoder'], batch,
verbose=False, use_eos=True)
return embeddings
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10, 'batch_size': 512}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
'tenacity': 5, 'epoch_size': 4}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
# Load SkipThought model
params_senteval['encoder'] = skipthoughts.load_model()
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,048 | 32.048387 | 97 | py |
MCSE | MCSE-master/SentEval/senteval/engine.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Generic sentence evaluation scripts wrapper
'''
from __future__ import absolute_import, division, unicode_literals
from senteval import utils
from senteval.binary import CREval, MREval, MPQAEval, SUBJEval
from senteval.snli import SNLIEval
from senteval.trec import TRECEval
from senteval.sick import SICKEntailmentEval, SICKEval
from senteval.mrpc import MRPCEval
from senteval.sts import STS12Eval, STS13Eval, STS14Eval, STS15Eval, STS16Eval, STSBenchmarkEval, SICKRelatednessEval, STSBenchmarkFinetune
from senteval.sst import SSTEval
from senteval.rank import ImageCaptionRetrievalEval
from senteval.probing import *
class SE(object):
def __init__(self, params, batcher, prepare=None):
# parameters
params = utils.dotdict(params)
params.usepytorch = True if 'usepytorch' not in params else params.usepytorch
params.seed = 1111 if 'seed' not in params else params.seed
params.batch_size = 128 if 'batch_size' not in params else params.batch_size
params.nhid = 0 if 'nhid' not in params else params.nhid
params.kfold = 5 if 'kfold' not in params else params.kfold
if 'classifier' not in params or not params['classifier']:
params.classifier = {'nhid': 0}
assert 'nhid' in params.classifier, 'Set number of hidden units in classifier config!!'
self.params = params
# batcher and prepare
self.batcher = batcher
self.prepare = prepare if prepare else lambda x, y: None
self.list_tasks = ['CR', 'MR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKRelatedness', 'SICKEntailment', 'STSBenchmark',
'SNLI', 'ImageCaptionRetrieval', 'STS12', 'STS13',
'STS14', 'STS15', 'STS16',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion', 'SICKRelatedness-finetune', 'STSBenchmark-finetune', 'STSBenchmark-fix']
def eval(self, name):
# evaluate on evaluation [name], either takes string or list of strings
if (isinstance(name, list)):
self.results = {x: self.eval(x) for x in name}
return self.results
tpath = self.params.task_path
assert name in self.list_tasks, str(name) + ' not in ' + str(self.list_tasks)
# Original SentEval tasks
if name == 'CR':
self.evaluation = CREval(tpath + '/downstream/CR', seed=self.params.seed)
elif name == 'MR':
self.evaluation = MREval(tpath + '/downstream/MR', seed=self.params.seed)
elif name == 'MPQA':
self.evaluation = MPQAEval(tpath + '/downstream/MPQA', seed=self.params.seed)
elif name == 'SUBJ':
self.evaluation = SUBJEval(tpath + '/downstream/SUBJ', seed=self.params.seed)
elif name == 'SST2':
self.evaluation = SSTEval(tpath + '/downstream/SST/binary', nclasses=2, seed=self.params.seed)
elif name == 'SST5':
self.evaluation = SSTEval(tpath + '/downstream/SST/fine', nclasses=5, seed=self.params.seed)
elif name == 'TREC':
self.evaluation = TRECEval(tpath + '/downstream/TREC', seed=self.params.seed)
elif name == 'MRPC':
self.evaluation = MRPCEval(tpath + '/downstream/MRPC', seed=self.params.seed)
elif name == 'SICKRelatedness':
self.evaluation = SICKRelatednessEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'STSBenchmark':
self.evaluation = STSBenchmarkEval(tpath + '/downstream/STS/STSBenchmark', seed=self.params.seed)
elif name == 'STSBenchmark-fix':
self.evaluation = STSBenchmarkEval(tpath + '/downstream/STS/STSBenchmark-fix', seed=self.params.seed)
elif name == 'STSBenchmark-finetune':
self.evaluation = STSBenchmarkFinetune(tpath + '/downstream/STS/STSBenchmark', seed=self.params.seed)
elif name == 'SICKRelatedness-finetune':
self.evaluation = SICKEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'SICKEntailment':
self.evaluation = SICKEntailmentEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'SNLI':
self.evaluation = SNLIEval(tpath + '/downstream/SNLI', seed=self.params.seed)
elif name in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16']:
fpath = name + '-en-test'
self.evaluation = eval(name + 'Eval')(tpath + '/downstream/STS/' + fpath, seed=self.params.seed)
elif name == 'ImageCaptionRetrieval':
self.evaluation = ImageCaptionRetrievalEval(tpath + '/downstream/COCO', seed=self.params.seed)
# Probing Tasks
elif name == 'Length':
self.evaluation = LengthEval(tpath + '/probing', seed=self.params.seed)
elif name == 'WordContent':
self.evaluation = WordContentEval(tpath + '/probing', seed=self.params.seed)
elif name == 'Depth':
self.evaluation = DepthEval(tpath + '/probing', seed=self.params.seed)
elif name == 'TopConstituents':
self.evaluation = TopConstituentsEval(tpath + '/probing', seed=self.params.seed)
elif name == 'BigramShift':
self.evaluation = BigramShiftEval(tpath + '/probing', seed=self.params.seed)
elif name == 'Tense':
self.evaluation = TenseEval(tpath + '/probing', seed=self.params.seed)
elif name == 'SubjNumber':
self.evaluation = SubjNumberEval(tpath + '/probing', seed=self.params.seed)
elif name == 'ObjNumber':
self.evaluation = ObjNumberEval(tpath + '/probing', seed=self.params.seed)
elif name == 'OddManOut':
self.evaluation = OddManOutEval(tpath + '/probing', seed=self.params.seed)
elif name == 'CoordinationInversion':
self.evaluation = CoordinationInversionEval(tpath + '/probing', seed=self.params.seed)
self.params.current_task = name
self.evaluation.do_prepare(self.params, self.prepare)
self.results = self.evaluation.run(self.params, self.batcher)
return self.results
| 6,525 | 49.2 | 139 | py |
MCSE | MCSE-master/SentEval/senteval/rank.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Image-Caption Retrieval with COCO dataset
'''
from __future__ import absolute_import, division, unicode_literals
import os
import sys
import logging
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
from senteval.tools.ranking import ImageSentenceRankingPytorch
class ImageCaptionRetrievalEval(object):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task: Image Caption Retrieval *****\n\n')
# Get captions and image features
self.seed = seed
train, dev, test = self.loadFile(task_path)
self.coco_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.coco_data['train']['sent'] + \
self.coco_data['dev']['sent'] + \
self.coco_data['test']['sent']
prepare(params, samples)
def loadFile(self, fpath):
coco = {}
for split in ['train', 'valid', 'test']:
list_sent = []
list_img_feat = []
if sys.version_info < (3, 0):
with open(os.path.join(fpath, split + '.pkl')) as f:
cocodata = pickle.load(f)
else:
with open(os.path.join(fpath, split + '.pkl'), 'rb') as f:
cocodata = pickle.load(f, encoding='latin1')
for imgkey in range(len(cocodata['features'])):
assert len(cocodata['image_to_caption_ids'][imgkey]) >= 5, \
cocodata['image_to_caption_ids'][imgkey]
for captkey in cocodata['image_to_caption_ids'][imgkey][0:5]:
sent = cocodata['captions'][captkey]['cleaned_caption']
sent += ' .' # add punctuation to end of sentence in COCO
list_sent.append(sent.encode('utf-8').split())
list_img_feat.append(cocodata['features'][imgkey])
assert len(list_sent) == len(list_img_feat) and \
len(list_sent) % 5 == 0
list_img_feat = np.array(list_img_feat).astype('float32')
coco[split] = {'sent': list_sent, 'imgfeat': list_img_feat}
return coco['train'], coco['valid'], coco['test']
def run(self, params, batcher):
coco_embed = {'train': {'sentfeat': [], 'imgfeat': []},
'dev': {'sentfeat': [], 'imgfeat': []},
'test': {'sentfeat': [], 'imgfeat': []}}
for key in self.coco_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
self.coco_data[key]['sent'] = np.array(self.coco_data[key]['sent'])
self.coco_data[key]['sent'], idx_sort = np.sort(self.coco_data[key]['sent']), np.argsort(self.coco_data[key]['sent'])
idx_unsort = np.argsort(idx_sort)
coco_embed[key]['X'] = []
nsent = len(self.coco_data[key]['sent'])
for ii in range(0, nsent, params.batch_size):
batch = self.coco_data[key]['sent'][ii:ii + params.batch_size]
embeddings = batcher(params, batch)
coco_embed[key]['sentfeat'].append(embeddings)
coco_embed[key]['sentfeat'] = np.vstack(coco_embed[key]['sentfeat'])[idx_unsort]
coco_embed[key]['imgfeat'] = np.array(self.coco_data[key]['imgfeat'])
logging.info('Computed {0} embeddings'.format(key))
config = {'seed': self.seed, 'projdim': 1000, 'margin': 0.2}
clf = ImageSentenceRankingPytorch(train=coco_embed['train'],
valid=coco_embed['dev'],
test=coco_embed['test'],
config=config)
bestdevscore, r1_i2t, r5_i2t, r10_i2t, medr_i2t, \
r1_t2i, r5_t2i, r10_t2i, medr_t2i = clf.run()
logging.debug("\nTest scores | Image to text: \
{0}, {1}, {2}, {3}".format(r1_i2t, r5_i2t, r10_i2t, medr_i2t))
logging.debug("Test scores | Text to image: \
{0}, {1}, {2}, {3}\n".format(r1_t2i, r5_t2i, r10_t2i, medr_t2i))
return {'devacc': bestdevscore,
'acc': [(r1_i2t, r5_i2t, r10_i2t, medr_i2t),
(r1_t2i, r5_t2i, r10_t2i, medr_t2i)],
'ndev': len(coco_embed['dev']['sentfeat']),
'ntest': len(coco_embed['test']['sentfeat'])}
| 4,643 | 41.605505 | 129 | py |
MCSE | MCSE-master/SentEval/senteval/snli.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SNLI - Entailment
'''
from __future__ import absolute_import, division, unicode_literals
import codecs
import os
import io
import copy
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class SNLIEval(object):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : SNLI Entailment*****\n\n')
self.seed = seed
train1 = self.loadFile(os.path.join(taskpath, 's1.train'))
train2 = self.loadFile(os.path.join(taskpath, 's2.train'))
trainlabels = io.open(os.path.join(taskpath, 'labels.train'),
encoding='utf-8').read().splitlines()
valid1 = self.loadFile(os.path.join(taskpath, 's1.dev'))
valid2 = self.loadFile(os.path.join(taskpath, 's2.dev'))
validlabels = io.open(os.path.join(taskpath, 'labels.dev'),
encoding='utf-8').read().splitlines()
test1 = self.loadFile(os.path.join(taskpath, 's1.test'))
test2 = self.loadFile(os.path.join(taskpath, 's2.test'))
testlabels = io.open(os.path.join(taskpath, 'labels.test'),
encoding='utf-8').read().splitlines()
# sort data (by s2 first) to reduce padding
sorted_train = sorted(zip(train2, train1, trainlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
train2, train1, trainlabels = map(list, zip(*sorted_train))
sorted_valid = sorted(zip(valid2, valid1, validlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
valid2, valid1, validlabels = map(list, zip(*sorted_valid))
sorted_test = sorted(zip(test2, test1, testlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
test2, test1, testlabels = map(list, zip(*sorted_test))
self.samples = train1 + train2 + valid1 + valid2 + test1 + test2
self.data = {'train': (train1, train2, trainlabels),
'valid': (valid1, valid2, validlabels),
'test': (test1, test2, testlabels)
}
def do_prepare(self, params, prepare):
return prepare(params, self.samples)
def loadFile(self, fpath):
with codecs.open(fpath, 'rb', 'latin-1') as f:
return [line.split() for line in
f.read().splitlines()]
def run(self, params, batcher):
self.X, self.y = {}, {}
dico_label = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
for key in self.data:
if key not in self.X:
self.X[key] = []
if key not in self.y:
self.y[key] = []
input1, input2, mylabels = self.data[key]
enc_input = []
n_labels = len(mylabels)
for ii in range(0, n_labels, params.batch_size):
batch1 = input1[ii:ii + params.batch_size]
batch2 = input2[ii:ii + params.batch_size]
if len(batch1) == len(batch2) and len(batch1) > 0:
enc1 = batcher(params, batch1)
enc2 = batcher(params, batch2)
enc_input.append(np.hstack((enc1, enc2, enc1 * enc2,
np.abs(enc1 - enc2))))
if (ii*params.batch_size) % (20000*params.batch_size) == 0:
logging.info("PROGRESS (encoding): %.2f%%" %
(100 * ii / n_labels))
self.X[key] = np.vstack(enc_input)
self.y[key] = [dico_label[y] for y in mylabels]
config = {'nclasses': 3, 'seed': self.seed,
'usepytorch': params.usepytorch,
'cudaEfficient': True,
'nhid': params.nhid, 'noreg': True}
config_classifier = copy.deepcopy(params.classifier)
config_classifier['max_epoch'] = 15
config_classifier['epoch_size'] = 1
config['classifier'] = config_classifier
clf = SplitClassifier(self.X, self.y, config)
devacc, testacc = clf.run()
logging.debug('Dev acc : {0} Test acc : {1} for SNLI\n'
.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(self.data['valid'][0]),
'ntest': len(self.data['test'][0])}
| 4,577 | 39.157895 | 75 | py |
MCSE | MCSE-master/SentEval/senteval/utils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
import numpy as np
import re
import inspect
from torch import optim
def create_dictionary(sentences):
words = {}
for s in sentences:
for word in s:
if word in words:
words[word] += 1
else:
words[word] = 1
words['<s>'] = 1e9 + 4
words['</s>'] = 1e9 + 3
words['<p>'] = 1e9 + 2
# words['<UNK>'] = 1e9 + 1
sorted_words = sorted(words.items(), key=lambda x: -x[1]) # inverse sort
id2word = []
word2id = {}
for i, (w, _) in enumerate(sorted_words):
id2word.append(w)
word2id[w] = i
return id2word, word2id
def cosine(u, v):
return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
class dotdict(dict):
""" dot.notation access to dictionary attributes """
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn, optim_params
| 2,713 | 27.270833 | 79 | py |
MCSE | MCSE-master/SentEval/senteval/binary.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Binary classifier and corresponding datasets : MR, CR, SUBJ, MPQA
'''
from __future__ import absolute_import, division, unicode_literals
import io
import os
import numpy as np
import logging
from senteval.tools.validation import InnerKFoldClassifier
class BinaryClassifierEval(object):
def __init__(self, pos, neg, seed=1111):
self.seed = seed
self.samples, self.labels = pos + neg, [1] * len(pos) + [0] * len(neg)
self.n_samples = len(self.samples)
def do_prepare(self, params, prepare):
# prepare is given the whole text
return prepare(params, self.samples)
# prepare puts everything it outputs in "params" : params.word2id etc
# Those output will be further used by "batcher".
def loadFile(self, fpath):
with io.open(fpath, 'r', encoding='latin-1') as f:
return [line.split() for line in f.read().splitlines()]
def run(self, params, batcher):
enc_input = []
# Sort to reduce padding
sorted_corpus = sorted(zip(self.samples, self.labels),
key=lambda z: (len(z[0]), z[1]))
sorted_samples = [x for (x, y) in sorted_corpus]
sorted_labels = [y for (x, y) in sorted_corpus]
logging.info('Generating sentence embeddings')
for ii in range(0, self.n_samples, params.batch_size):
batch = sorted_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
enc_input.append(embeddings)
enc_input = np.vstack(enc_input)
logging.info('Generated sentence embeddings')
config = {'nclasses': 2, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid, 'kfold': params.kfold}
clf = InnerKFoldClassifier(enc_input, np.array(sorted_labels), config)
devacc, testacc = clf.run()
logging.debug('Dev acc : {0} Test acc : {1}\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc, 'ndev': self.n_samples,
'ntest': self.n_samples}
class CREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : CR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'custrev.pos'))
neg = self.loadFile(os.path.join(task_path, 'custrev.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
class MREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : MR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'rt-polarity.pos'))
neg = self.loadFile(os.path.join(task_path, 'rt-polarity.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
class SUBJEval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SUBJ *****\n\n')
obj = self.loadFile(os.path.join(task_path, 'subj.objective'))
subj = self.loadFile(os.path.join(task_path, 'subj.subjective'))
super(self.__class__, self).__init__(obj, subj, seed)
class MPQAEval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : MPQA *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'mpqa.pos'))
neg = self.loadFile(os.path.join(task_path, 'mpqa.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
| 3,712 | 38.924731 | 79 | py |
MCSE | MCSE-master/SentEval/senteval/mrpc.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
MRPC : Microsoft Research Paraphrase (detection) Corpus
'''
from __future__ import absolute_import, division, unicode_literals
import os
import logging
import numpy as np
import io
from senteval.tools.validation import KFoldClassifier
from sklearn.metrics import f1_score
class MRPCEval(object):
def __init__(self, task_path, seed=1111):
logging.info('***** Transfer task : MRPC *****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path,
'msr_paraphrase_train.txt'))
test = self.loadFile(os.path.join(task_path,
'msr_paraphrase_test.txt'))
self.mrpc_data = {'train': train, 'test': test}
def do_prepare(self, params, prepare):
# TODO : Should we separate samples in "train, test"?
samples = self.mrpc_data['train']['X_A'] + \
self.mrpc_data['train']['X_B'] + \
self.mrpc_data['test']['X_A'] + self.mrpc_data['test']['X_B']
return prepare(params, samples)
def loadFile(self, fpath):
mrpc_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
mrpc_data['X_A'].append(text[3].split())
mrpc_data['X_B'].append(text[4].split())
mrpc_data['y'].append(text[0])
mrpc_data['X_A'] = mrpc_data['X_A'][1:]
mrpc_data['X_B'] = mrpc_data['X_B'][1:]
mrpc_data['y'] = [int(s) for s in mrpc_data['y'][1:]]
return mrpc_data
def run(self, params, batcher):
mrpc_embed = {'train': {}, 'test': {}}
for key in self.mrpc_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
text_data = {}
sorted_corpus = sorted(zip(self.mrpc_data[key]['X_A'],
self.mrpc_data[key]['X_B'],
self.mrpc_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
text_data['A'] = [x for (x, y, z) in sorted_corpus]
text_data['B'] = [y for (x, y, z) in sorted_corpus]
text_data['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['A', 'B']:
mrpc_embed[key][txt_type] = []
for ii in range(0, len(text_data['y']), params.batch_size):
batch = text_data[txt_type][ii:ii + params.batch_size]
embeddings = batcher(params, batch)
mrpc_embed[key][txt_type].append(embeddings)
mrpc_embed[key][txt_type] = np.vstack(mrpc_embed[key][txt_type])
mrpc_embed[key]['y'] = np.array(text_data['y'])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = mrpc_embed['train']['A']
trainB = mrpc_embed['train']['B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = mrpc_embed['train']['y']
# Test
testA = mrpc_embed['test']['A']
testB = mrpc_embed['test']['B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = mrpc_embed['test']['y']
config = {'nclasses': 2, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid, 'kfold': params.kfold}
clf = KFoldClassifier(train={'X': trainF, 'y': trainY},
test={'X': testF, 'y': testY}, config=config)
devacc, testacc, yhat = clf.run()
testf1 = round(100*f1_score(testY, yhat), 2)
logging.debug('Dev acc : {0} Test acc {1}; Test F1 {2} for MRPC.\n'
.format(devacc, testacc, testf1))
return {'devacc': devacc, 'acc': testacc, 'f1': testf1,
'ndev': len(trainA), 'ntest': len(testA)}
| 4,202 | 39.028571 | 80 | py |
MCSE | MCSE-master/SentEval/senteval/sts.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
STS-{2012,2013,2014,2015,2016} (unsupervised) and
STS-benchmark (supervised) tasks
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import numpy as np
import logging
import torch
from torch.nn.functional import normalize
from scipy.stats import spearmanr, pearsonr
from senteval.utils import cosine
from senteval.sick import SICKEval
# Question: normalization before evaluation?
def align_loss(x, y, alpha=2):
if min(x.shape)==0 or min(y.shape)==0:
return 0
x = normalize(x, p=2, dim=1)
y = normalize(y, p=2, dim=1)
return (x - y).norm(p=2, dim=1).pow(alpha).mean()
def uniform_loss(x, t=2):
# pdist: Computes the p-norm distance between every pair of row vectors in the input
if min(x.shape) == 0:
return 0
x = normalize(x, p=2, dim=1)
return torch.pdist(x, p=2).pow(2).mul(-t).exp().mean().log()
class STSEval(object):
def loadFile(self, fpath):
self.data = {}
self.samples = []
for dataset in self.datasets:
sent1, sent2 = zip(*[l.split("\t") for l in
io.open(fpath + '/STS.input.%s.txt' % dataset,
encoding='utf8').read().splitlines()])
raw_scores = np.array([x for x in
io.open(fpath + '/STS.gs.%s.txt' % dataset,
encoding='utf8')
.read().splitlines()])
not_empty_idx = raw_scores != ''
gs_scores = [float(x) for x in raw_scores[not_empty_idx]]
sent1 = np.array([s.split() for s in sent1])[not_empty_idx]
sent2 = np.array([s.split() for s in sent2])[not_empty_idx]
# sort data by length to minimize padding in batcher
sorted_data = sorted(zip(sent1, sent2, gs_scores),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
sent1, sent2, gs_scores = map(list, zip(*sorted_data))
self.data[dataset] = (sent1, sent2, gs_scores)
self.samples += sent1 + sent2
def do_prepare(self, params, prepare):
if 'similarity' in params:
self.similarity = params.similarity
else: # Default similarity is cosine
self.similarity = lambda s1, s2: np.nan_to_num(cosine(np.nan_to_num(s1), np.nan_to_num(s2)))
return prepare(params, self.samples)
def run(self, params, batcher):
results = {}
all_sys_scores = []
all_gs_scores = []
################# newly added
all_loss_align = []
all_loss_uniform = []
#################
for dataset in self.datasets:
loss_align = []
loss_uniform = []
sys_scores = []
input1, input2, gs_scores = self.data[dataset]
for ii in range(0, len(gs_scores), params.batch_size):
batch1 = input1[ii:ii + params.batch_size]
batch2 = input2[ii:ii + params.batch_size]
batch_gs_scores = gs_scores[ii:ii + params.batch_size] # newly added
# we assume get_batch already throws out the faulty ones
if len(batch1) == len(batch2) and len(batch1) > 0:
enc1 = batcher(params, batch1)
enc2 = batcher(params, batch2)
################# newly added
pos_indices = [i for i in range(len(batch_gs_scores)) if batch_gs_scores[i] >= 4.0]
enc1_pos = enc1[pos_indices]
enc2_pos = enc2[pos_indices]
loss1 = align_loss(enc1_pos, enc2_pos)
loss2 = uniform_loss(torch.cat((enc1, enc2), dim=0))
loss_align.append(loss1)
loss_uniform.append(loss2)
#################
for kk in range(enc2.shape[0]):
sys_score = self.similarity(enc1[kk], enc2[kk])
sys_scores.append(sys_score)
all_sys_scores.extend(sys_scores)
all_gs_scores.extend(gs_scores)
all_loss_align.extend(loss_align)
all_loss_uniform.extend(loss_uniform)
results[dataset] = {'pearson': pearsonr(sys_scores, gs_scores),
'spearman': spearmanr(sys_scores, gs_scores),
'nsamples': len(sys_scores),
'align_loss': np.mean(loss_align, dtype='float64'), # newly added
'uniform_loss': np.mean(loss_uniform, dtype='float64')} # newly added
logging.debug('%s : pearson = %.4f, spearman = %.4f, align_loss = %.4f, uniform_loss = %.4f' %
(dataset, results[dataset]['pearson'][0],
results[dataset]['spearman'][0], results[dataset]['align_loss'],
results[dataset]['uniform_loss']))
weights = [results[dset]['nsamples'] for dset in results.keys()]
list_prs = np.array([results[dset]['pearson'][0] for
dset in results.keys()])
list_spr = np.array([results[dset]['spearman'][0] for
dset in results.keys()])
list_align = np.array([results[dset]['align_loss'] for
dset in results.keys()])
list_uniform = np.array([results[dset]['uniform_loss'] for
dset in results.keys()])
avg_pearson = np.average(list_prs)
avg_spearman = np.average(list_spr)
avg_align = np.average(list_align)
avg_uniform = np.average(list_uniform)
wavg_pearson = np.average(list_prs, weights=weights)
wavg_spearman = np.average(list_spr, weights=weights)
wavg_align = np.average(list_align, weights=weights)
wavg_uniform = np.average(list_uniform, weights=weights)
all_pearson = pearsonr(all_sys_scores, all_gs_scores)
all_spearman = spearmanr(all_sys_scores, all_gs_scores)
all_align = np.mean(all_loss_align)
all_uniform = np.mean(all_loss_uniform)
results['all'] = {'pearson': {'all': all_pearson[0],
'mean': avg_pearson,
'wmean': wavg_pearson},
'spearman': {'all': all_spearman[0],
'mean': avg_spearman,
'wmean': wavg_spearman},
'align_loss':{'all': all_align,
'mean': avg_align,
'wmean': wavg_align},
'uniform_loss':{'all': all_uniform,
'mean': avg_uniform,
'wmean': wavg_uniform}}
logging.debug('ALL : Pearson = %.4f, \
Spearman = %.4f, align_loss = %.4f, uniform_loss = %.4f' % (all_pearson[0], all_spearman[0], all_align, all_uniform))
logging.debug('ALL (weighted average) : Pearson = %.4f, \
Spearman = %.4f, align_loss = %.4f, uniform_loss = %.4f' % (wavg_pearson, wavg_spearman, wavg_align, wavg_uniform))
logging.debug('ALL (average) : Pearson = %.4f, \
Spearman = %.4f, align_loss = %.4f, uniform_loss = %.4f\n' % (avg_pearson, avg_spearman, avg_align, avg_uniform))
return results
class STS12Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS12 *****\n\n')
self.seed = seed
self.datasets = ['MSRpar', 'MSRvid', 'SMTeuroparl',
'surprise.OnWN', 'surprise.SMTnews']
self.loadFile(taskpath)
class STS13Eval(STSEval):
# STS13 here does not contain the "SMT" subtask due to LICENSE issue
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS13 (-SMT) *****\n\n')
self.seed = seed
self.datasets = ['FNWN', 'headlines', 'OnWN']
self.loadFile(taskpath)
class STS14Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS14 *****\n\n')
self.seed = seed
self.datasets = ['deft-forum', 'deft-news', 'headlines',
'images', 'OnWN', 'tweet-news']
self.loadFile(taskpath)
class STS15Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS15 *****\n\n')
self.seed = seed
self.datasets = ['answers-forums', 'answers-students',
'belief', 'headlines', 'images']
self.loadFile(taskpath)
class STS16Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS16 *****\n\n')
self.seed = seed
self.datasets = ['answer-answer', 'headlines', 'plagiarism',
'postediting', 'question-question']
self.loadFile(taskpath)
class STSBenchmarkEval(STSEval):
def __init__(self, task_path, seed=1111):
logging.debug('\n\n***** Transfer task : STSBenchmark*****\n\n')
self.seed = seed
self.samples = []
train = self.loadFile(os.path.join(task_path, 'sts-train.csv'))
dev = self.loadFile(os.path.join(task_path, 'sts-dev.csv'))
test = self.loadFile(os.path.join(task_path, 'sts-test.csv'))
self.datasets = ['train', 'dev', 'test']
self.data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
sick_data['X_A'].append(text[5].split()) # sentence1
sick_data['X_B'].append(text[6].split()) # sentence2
sick_data['y'].append(text[4]) # scores
sick_data['y'] = [float(s) for s in sick_data['y']]
self.samples += sick_data['X_A'] + sick_data["X_B"]
return (sick_data['X_A'], sick_data["X_B"], sick_data['y'])
class STSBenchmarkFinetune(SICKEval):
def __init__(self, task_path, seed=1111):
logging.debug('\n\n***** Transfer task : STSBenchmark*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'sts-train.csv'))
dev = self.loadFile(os.path.join(task_path, 'sts-dev.csv'))
test = self.loadFile(os.path.join(task_path, 'sts-test.csv'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
sick_data['X_A'].append(text[5].split())
sick_data['X_B'].append(text[6].split())
sick_data['y'].append(text[4])
sick_data['y'] = [float(s) for s in sick_data['y']]
return sick_data
class SICKRelatednessEval(STSEval):
def __init__(self, task_path, seed=1111):
logging.debug('\n\n***** Transfer task : SICKRelatedness*****\n\n')
self.seed = seed
self.samples = []
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.datasets = ['train', 'dev', 'test']
self.data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split()) # sentence 1
sick_data['X_B'].append(text[2].split()) # sentence 2
sick_data['y'].append(text[3]) # relatedness_score
sick_data['y'] = [float(s) for s in sick_data['y']]
self.samples += sick_data['X_A'] + sick_data["X_B"]
return (sick_data['X_A'], sick_data["X_B"], sick_data['y'])
| 12,674 | 42.407534 | 129 | py |
MCSE | MCSE-master/SentEval/senteval/probing.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
probing tasks
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import copy
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class PROBINGEval(object):
def __init__(self, task, task_path, seed=1111):
self.seed = seed
self.task = task
logging.debug('***** (Probing) Transfer task : %s classification *****', self.task.upper())
self.task_data = {'train': {'X': [], 'y': []},
'dev': {'X': [], 'y': []},
'test': {'X': [], 'y': []}}
self.loadFile(task_path)
logging.info('Loaded %s train - %s dev - %s test for %s' %
(len(self.task_data['train']['y']), len(self.task_data['dev']['y']),
len(self.task_data['test']['y']), self.task))
def do_prepare(self, params, prepare):
samples = self.task_data['train']['X'] + self.task_data['dev']['X'] + \
self.task_data['test']['X']
return prepare(params, samples)
def loadFile(self, fpath):
self.tok2split = {'tr': 'train', 'va': 'dev', 'te': 'test'}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip().split('\t')
self.task_data[self.tok2split[line[0]]]['X'].append(line[-1].split())
self.task_data[self.tok2split[line[0]]]['y'].append(line[1])
labels = sorted(np.unique(self.task_data['train']['y']))
self.tok2label = dict(zip(labels, range(len(labels))))
self.nclasses = len(self.tok2label)
for split in self.task_data:
for i, y in enumerate(self.task_data[split]['y']):
self.task_data[split]['y'][i] = self.tok2label[y]
def run(self, params, batcher):
task_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
logging.info('Computing embeddings for train/dev/test')
for key in self.task_data:
# Sort to reduce padding
sorted_data = sorted(zip(self.task_data[key]['X'],
self.task_data[key]['y']),
key=lambda z: (len(z[0]), z[1]))
self.task_data[key]['X'], self.task_data[key]['y'] = map(list, zip(*sorted_data))
task_embed[key]['X'] = []
for ii in range(0, len(self.task_data[key]['y']), bsize):
batch = self.task_data[key]['X'][ii:ii + bsize]
embeddings = batcher(params, batch)
task_embed[key]['X'].append(embeddings)
task_embed[key]['X'] = np.vstack(task_embed[key]['X'])
task_embed[key]['y'] = np.array(self.task_data[key]['y'])
logging.info('Computed embeddings')
config_classifier = {'nclasses': self.nclasses, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier}
if self.task == "WordContent" and params.classifier['nhid'] > 0:
config_classifier = copy.deepcopy(config_classifier)
config_classifier['classifier']['nhid'] = 0
print(params.classifier['nhid'])
clf = SplitClassifier(X={'train': task_embed['train']['X'],
'valid': task_embed['dev']['X'],
'test': task_embed['test']['X']},
y={'train': task_embed['train']['y'],
'valid': task_embed['dev']['y'],
'test': task_embed['test']['y']},
config=config_classifier)
devacc, testacc = clf.run()
logging.debug('\nDev acc : %.1f Test acc : %.1f for %s classification\n' % (devacc, testacc, self.task.upper()))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(task_embed['dev']['X']),
'ntest': len(task_embed['test']['X'])}
"""
Surface Information
"""
class LengthEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'sentence_length.txt')
# labels: bins
PROBINGEval.__init__(self, 'Length', task_path, seed)
class WordContentEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'word_content.txt')
# labels: 200 target words
PROBINGEval.__init__(self, 'WordContent', task_path, seed)
"""
Latent Structural Information
"""
class DepthEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'tree_depth.txt')
# labels: bins
PROBINGEval.__init__(self, 'Depth', task_path, seed)
class TopConstituentsEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'top_constituents.txt')
# labels: 'PP_NP_VP_.' .. (20 classes)
PROBINGEval.__init__(self, 'TopConstituents', task_path, seed)
class BigramShiftEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'bigram_shift.txt')
# labels: 0 or 1
PROBINGEval.__init__(self, 'BigramShift', task_path, seed)
# TODO: Voice?
"""
Latent Semantic Information
"""
class TenseEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'past_present.txt')
# labels: 'PRES', 'PAST'
PROBINGEval.__init__(self, 'Tense', task_path, seed)
class SubjNumberEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'subj_number.txt')
# labels: 'NN', 'NNS'
PROBINGEval.__init__(self, 'SubjNumber', task_path, seed)
class ObjNumberEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'obj_number.txt')
# labels: 'NN', 'NNS'
PROBINGEval.__init__(self, 'ObjNumber', task_path, seed)
class OddManOutEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'odd_man_out.txt')
# labels: 'O', 'C'
PROBINGEval.__init__(self, 'OddManOut', task_path, seed)
class CoordinationInversionEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'coordination_inversion.txt')
# labels: 'O', 'I'
PROBINGEval.__init__(self, 'CoordinationInversion', task_path, seed)
| 6,786 | 38.459302 | 120 | py |
MCSE | MCSE-master/SentEval/senteval/sick.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SICK Relatedness and Entailment
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr, spearmanr
from senteval.tools.relatedness import RelatednessPytorch
from senteval.tools.validation import SplitClassifier
class SICKEval(object):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SICK-Relatedness*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.sick_data['train']['X_A'] + \
self.sick_data['train']['X_B'] + \
self.sick_data['dev']['X_A'] + \
self.sick_data['dev']['X_B'] + \
self.sick_data['test']['X_A'] + self.sick_data['test']['X_B']
return prepare(params, samples)
def loadFile(self, fpath):
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[3])
sick_data['y'] = [float(s) for s in sick_data['y']]
return sick_data
def run(self, params, batcher):
sick_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sick_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_corpus = sorted(zip(self.sick_data[key]['X_A'],
self.sick_data[key]['X_B'],
self.sick_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
self.sick_data[key]['X_A'] = [x for (x, y, z) in sorted_corpus]
self.sick_data[key]['X_B'] = [y for (x, y, z) in sorted_corpus]
self.sick_data[key]['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['X_A', 'X_B']:
sick_embed[key][txt_type] = []
for ii in range(0, len(self.sick_data[key]['y']), bsize):
batch = self.sick_data[key][txt_type][ii:ii + bsize]
embeddings = batcher(params, batch)
sick_embed[key][txt_type].append(embeddings)
sick_embed[key][txt_type] = np.vstack(sick_embed[key][txt_type])
sick_embed[key]['y'] = np.array(self.sick_data[key]['y'])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = sick_embed['train']['X_A']
trainB = sick_embed['train']['X_B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = self.encode_labels(self.sick_data['train']['y'])
# Dev
devA = sick_embed['dev']['X_A']
devB = sick_embed['dev']['X_B']
devF = np.c_[np.abs(devA - devB), devA * devB]
devY = self.encode_labels(self.sick_data['dev']['y'])
# Test
testA = sick_embed['test']['X_A']
testB = sick_embed['test']['X_B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = self.encode_labels(self.sick_data['test']['y'])
config = {'seed': self.seed, 'nclasses': 5}
clf = RelatednessPytorch(train={'X': trainF, 'y': trainY},
valid={'X': devF, 'y': devY},
test={'X': testF, 'y': testY},
devscores=self.sick_data['dev']['y'],
config=config)
devspr, yhat = clf.run()
pr = pearsonr(yhat, self.sick_data['test']['y'])[0]
sr = spearmanr(yhat, self.sick_data['test']['y'])[0]
pr = 0 if pr != pr else pr
sr = 0 if sr != sr else sr
se = mean_squared_error(yhat, self.sick_data['test']['y'])
logging.debug('Dev : Spearman {0}'.format(devspr))
logging.debug('Test : Pearson {0} Spearman {1} MSE {2} \
for SICK Relatedness\n'.format(pr, sr, se))
return {'devspearman': devspr, 'pearson': pr, 'spearman': sr, 'mse': se,
'yhat': yhat, 'ndev': len(devA), 'ntest': len(testA)}
def encode_labels(self, labels, nclass=5):
"""
Label encoding from Tree LSTM paper (Tai, Socher, Manning)
"""
Y = np.zeros((len(labels), nclass)).astype('float32')
for j, y in enumerate(labels):
for i in range(nclass):
if i+1 == np.floor(y) + 1:
Y[j, i] = y - np.floor(y)
if i+1 == np.floor(y):
Y[j, i] = np.floor(y) - y + 1
return Y
class SICKEntailmentEval(SICKEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SICK-Entailment*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[4])
sick_data['y'] = [label2id[s] for s in sick_data['y']]
return sick_data
def run(self, params, batcher):
sick_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sick_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_corpus = sorted(zip(self.sick_data[key]['X_A'],
self.sick_data[key]['X_B'],
self.sick_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
self.sick_data[key]['X_A'] = [x for (x, y, z) in sorted_corpus]
self.sick_data[key]['X_B'] = [y for (x, y, z) in sorted_corpus]
self.sick_data[key]['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['X_A', 'X_B']:
sick_embed[key][txt_type] = []
for ii in range(0, len(self.sick_data[key]['y']), bsize):
batch = self.sick_data[key][txt_type][ii:ii + bsize]
embeddings = batcher(params, batch)
sick_embed[key][txt_type].append(embeddings)
sick_embed[key][txt_type] = np.vstack(sick_embed[key][txt_type])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = sick_embed['train']['X_A']
trainB = sick_embed['train']['X_B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = np.array(self.sick_data['train']['y'])
# Dev
devA = sick_embed['dev']['X_A']
devB = sick_embed['dev']['X_B']
devF = np.c_[np.abs(devA - devB), devA * devB]
devY = np.array(self.sick_data['dev']['y'])
# Test
testA = sick_embed['test']['X_A']
testB = sick_embed['test']['X_B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = np.array(self.sick_data['test']['y'])
config = {'nclasses': 3, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid}
clf = SplitClassifier(X={'train': trainF, 'valid': devF, 'test': testF},
y={'train': trainY, 'valid': devY, 'test': testY},
config=config)
devacc, testacc = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} for \
SICK entailment\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(devA), 'ntest': len(testA)}
| 9,243 | 41.599078 | 80 | py |
MCSE | MCSE-master/SentEval/senteval/__init__.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from senteval.engine import SE
| 264 | 23.090909 | 61 | py |
MCSE | MCSE-master/SentEval/senteval/trec.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
TREC question-type classification
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from senteval.tools.validation import KFoldClassifier
class TRECEval(object):
def __init__(self, task_path, seed=1111):
logging.info('***** Transfer task : TREC *****\n\n')
self.seed = seed
self.train = self.loadFile(os.path.join(task_path, 'train_5500.label'))
self.test = self.loadFile(os.path.join(task_path, 'TREC_10.label'))
def do_prepare(self, params, prepare):
samples = self.train['X'] + self.test['X']
return prepare(params, samples)
def loadFile(self, fpath):
trec_data = {'X': [], 'y': []}
tgt2idx = {'ABBR': 0, 'DESC': 1, 'ENTY': 2,
'HUM': 3, 'LOC': 4, 'NUM': 5}
with io.open(fpath, 'r', encoding='latin-1') as f:
for line in f:
target, sample = line.strip().split(':', 1)
sample = sample.split(' ', 1)[1].split()
assert target in tgt2idx, target
trec_data['X'].append(sample)
trec_data['y'].append(tgt2idx[target])
return trec_data
def run(self, params, batcher):
train_embeddings, test_embeddings = [], []
# Sort to reduce padding
sorted_corpus_train = sorted(zip(self.train['X'], self.train['y']),
key=lambda z: (len(z[0]), z[1]))
train_samples = [x for (x, y) in sorted_corpus_train]
train_labels = [y for (x, y) in sorted_corpus_train]
sorted_corpus_test = sorted(zip(self.test['X'], self.test['y']),
key=lambda z: (len(z[0]), z[1]))
test_samples = [x for (x, y) in sorted_corpus_test]
test_labels = [y for (x, y) in sorted_corpus_test]
# Get train embeddings
for ii in range(0, len(train_labels), params.batch_size):
batch = train_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
train_embeddings.append(embeddings)
train_embeddings = np.vstack(train_embeddings)
logging.info('Computed train embeddings')
# Get test embeddings
for ii in range(0, len(test_labels), params.batch_size):
batch = test_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
test_embeddings.append(embeddings)
test_embeddings = np.vstack(test_embeddings)
logging.info('Computed test embeddings')
config_classifier = {'nclasses': 6, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'kfold': params.kfold}
clf = KFoldClassifier({'X': train_embeddings,
'y': np.array(train_labels)},
{'X': test_embeddings,
'y': np.array(test_labels)},
config_classifier)
devacc, testacc, _ = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} \
for TREC\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(self.train['X']), 'ntest': len(self.test['X'])}
| 3,565 | 38.622222 | 79 | py |
MCSE | MCSE-master/SentEval/senteval/sst.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SST - binary classification
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class SSTEval(object):
def __init__(self, task_path, nclasses=2, seed=1111):
self.seed = seed
# binary of fine-grained
assert nclasses in [2, 5]
self.nclasses = nclasses
self.task_name = 'Binary' if self.nclasses == 2 else 'Fine-Grained'
logging.debug('***** Transfer task : SST %s classification *****\n\n', self.task_name)
train = self.loadFile(os.path.join(task_path, 'sentiment-train'))
dev = self.loadFile(os.path.join(task_path, 'sentiment-dev'))
test = self.loadFile(os.path.join(task_path, 'sentiment-test'))
self.sst_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.sst_data['train']['X'] + self.sst_data['dev']['X'] + \
self.sst_data['test']['X']
return prepare(params, samples)
def loadFile(self, fpath):
sst_data = {'X': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if self.nclasses == 2:
sample = line.strip().split('\t')
sst_data['y'].append(int(sample[1]))
sst_data['X'].append(sample[0].split())
elif self.nclasses == 5:
sample = line.strip().split(' ', 1)
sst_data['y'].append(int(sample[0]))
sst_data['X'].append(sample[1].split())
assert max(sst_data['y']) == self.nclasses - 1
return sst_data
def run(self, params, batcher):
sst_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sst_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_data = sorted(zip(self.sst_data[key]['X'],
self.sst_data[key]['y']),
key=lambda z: (len(z[0]), z[1]))
self.sst_data[key]['X'], self.sst_data[key]['y'] = map(list, zip(*sorted_data))
sst_embed[key]['X'] = []
for ii in range(0, len(self.sst_data[key]['y']), bsize):
batch = self.sst_data[key]['X'][ii:ii + bsize]
embeddings = batcher(params, batch)
sst_embed[key]['X'].append(embeddings)
sst_embed[key]['X'] = np.vstack(sst_embed[key]['X'])
sst_embed[key]['y'] = np.array(self.sst_data[key]['y'])
logging.info('Computed {0} embeddings'.format(key))
config_classifier = {'nclasses': self.nclasses, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier}
clf = SplitClassifier(X={'train': sst_embed['train']['X'],
'valid': sst_embed['dev']['X'],
'test': sst_embed['test']['X']},
y={'train': sst_embed['train']['y'],
'valid': sst_embed['dev']['y'],
'test': sst_embed['test']['y']},
config=config_classifier)
devacc, testacc = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} for \
SST {2} classification\n'.format(devacc, testacc, self.task_name))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(sst_embed['dev']['X']),
'ntest': len(sst_embed['test']['X'])}
| 3,946 | 39.690722 | 94 | py |
MCSE | MCSE-master/SentEval/senteval/tools/relatedness.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Semantic Relatedness (supervised) with Pytorch
"""
from __future__ import absolute_import, division, unicode_literals
import copy
import numpy as np
import torch
from torch import nn
import torch.optim as optim
from scipy.stats import pearsonr, spearmanr
class RelatednessPytorch(object):
# Can be used for SICK-Relatedness, and STS14
def __init__(self, train, valid, test, devscores, config):
# fix seed
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
assert torch.cuda.is_available(), 'torch.cuda required for Relatedness'
torch.cuda.manual_seed(config['seed'])
self.train = train
self.valid = valid
self.test = test
self.devscores = devscores
self.inputdim = train['X'].shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.l2reg = 0.
self.batch_size = 64
self.maxepoch = 1000
self.early_stop = True
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
nn.Softmax(dim=-1),
)
self.loss_fn = nn.MSELoss()
if torch.cuda.is_available():
self.model = self.model.cuda()
self.loss_fn = self.loss_fn.cuda()
self.loss_fn.size_average = False
self.optimizer = optim.Adam(self.model.parameters(),
weight_decay=self.l2reg)
def prepare_data(self, trainX, trainy, devX, devy, testX, testy):
# Transform probs to log-probs for KL-divergence
trainX = torch.from_numpy(trainX).float().cuda()
trainy = torch.from_numpy(trainy).float().cuda()
devX = torch.from_numpy(devX).float().cuda()
devy = torch.from_numpy(devy).float().cuda()
testX = torch.from_numpy(testX).float().cuda()
testY = torch.from_numpy(testy).float().cuda()
return trainX, trainy, devX, devy, testX, testy
def run(self):
self.nepoch = 0
bestpr = -1
early_stop_count = 0
r = np.arange(1, 6)
stop_train = False
# Preparing data
trainX, trainy, devX, devy, testX, testy = self.prepare_data(
self.train['X'], self.train['y'],
self.valid['X'], self.valid['y'],
self.test['X'], self.test['y'])
# Training
while not stop_train and self.nepoch <= self.maxepoch:
self.trainepoch(trainX, trainy, nepoches=50)
yhat = np.dot(self.predict_proba(devX), r)
pr = spearmanr(yhat, self.devscores)[0]
pr = 0 if pr != pr else pr # if NaN bc std=0
# early stop on Pearson
if pr > bestpr:
bestpr = pr
bestmodel = copy.deepcopy(self.model)
elif self.early_stop:
if early_stop_count >= 3:
stop_train = True
early_stop_count += 1
self.model = bestmodel
yhat = np.dot(self.predict_proba(testX), r)
return bestpr, yhat
def trainepoch(self, X, y, nepoches=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + nepoches):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
# forward
idx = torch.from_numpy(permutation[i:i + self.batch_size]).long().cuda()
Xbatch = X[idx]
ybatch = y[idx]
output = self.model(Xbatch)
# loss
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += nepoches
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
if len(probas) == 0:
probas = self.model(Xbatch).data.cpu().numpy()
else:
probas = np.concatenate((probas, self.model(Xbatch).data.cpu().numpy()), axis=0)
return probas
| 4,552 | 32.725926 | 100 | py |
MCSE | MCSE-master/SentEval/senteval/tools/validation.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Validation and classification
(train) : inner-kfold classifier
(train, test) : kfold classifier
(train, dev, test) : split classifier
"""
from __future__ import absolute_import, division, unicode_literals
import logging
import numpy as np
from senteval.tools.classifier import MLP
import sklearn
assert(sklearn.__version__ >= "0.18.0"), \
"need to update sklearn to version >= 0.18.0"
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
def get_classif_name(classifier_config, usepytorch):
if not usepytorch:
modelname = 'sklearn-LogReg'
else:
nhid = classifier_config['nhid']
optim = 'adam' if 'optim' not in classifier_config else classifier_config['optim']
bs = 64 if 'batch_size' not in classifier_config else classifier_config['batch_size']
modelname = 'pytorch-MLP-nhid%s-%s-bs%s' % (nhid, optim, bs)
return modelname
# Pytorch version
class InnerKFoldClassifier(object):
"""
(train) split classifier : InnerKfold.
"""
def __init__(self, X, y, config):
self.X = X
self.y = y
self.featdim = X.shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.devresults = []
self.testresults = []
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.k = 5 if 'kfold' not in config else config['kfold']
def run(self):
logging.info('Training {0} with (inner) {1}-fold cross-validation'
.format(self.modelname, self.k))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-2, 4, 1)]
skf = StratifiedKFold(n_splits=self.k, shuffle=True, random_state=1111)
innerskf = StratifiedKFold(n_splits=self.k, shuffle=True,
random_state=1111)
count = 0
for train_idx, test_idx in skf.split(self.X, self.y):
count += 1
X_train, X_test = self.X[train_idx], self.X[test_idx]
y_train, y_test = self.y[train_idx], self.y[test_idx]
scores = []
for reg in regs:
regscores = []
for inner_train_idx, inner_test_idx in innerskf.split(X_train, y_train):
X_in_train, X_in_test = X_train[inner_train_idx], X_train[inner_test_idx]
y_in_train, y_in_test = y_train[inner_train_idx], y_train[inner_test_idx]
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed)
clf.fit(X_in_train, y_in_train,
validation_data=(X_in_test, y_in_test))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(X_in_train, y_in_train)
regscores.append(clf.score(X_in_test, y_in_test))
scores.append(round(100*np.mean(regscores), 2))
optreg = regs[np.argmax(scores)]
logging.info('Best param found at split {0}: l2reg = {1} \
with score {2}'.format(count, optreg, np.max(scores)))
self.devresults.append(np.max(scores))
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed)
clf.fit(X_train, y_train, validation_split=0.05)
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(X_train, y_train)
self.testresults.append(round(100*clf.score(X_test, y_test), 2))
devaccuracy = round(np.mean(self.devresults), 2)
testaccuracy = round(np.mean(self.testresults), 2)
return devaccuracy, testaccuracy
class KFoldClassifier(object):
"""
(train, test) split classifier : cross-validation on train.
"""
def __init__(self, train, test, config):
self.train = train
self.test = test
self.featdim = self.train['X'].shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.k = 5 if 'kfold' not in config else config['kfold']
def run(self):
# cross-validation
logging.info('Training {0} with {1}-fold cross-validation'
.format(self.modelname, self.k))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-1, 6, 1)]
skf = StratifiedKFold(n_splits=self.k, shuffle=True,
random_state=self.seed)
scores = []
for reg in regs:
scanscores = []
for train_idx, test_idx in skf.split(self.train['X'],
self.train['y']):
# Split data
X_train, y_train = self.train['X'][train_idx], self.train['y'][train_idx]
X_test, y_test = self.train['X'][test_idx], self.train['y'][test_idx]
# Train classifier
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed)
clf.fit(X_train, y_train, validation_data=(X_test, y_test))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
scanscores.append(score)
# Append mean score
scores.append(round(100*np.mean(scanscores), 2))
# evaluation
logging.info([('reg:' + str(regs[idx]), scores[idx])
for idx in range(len(scores))])
optreg = regs[np.argmax(scores)]
devaccuracy = np.max(scores)
logging.info('Cross-validation : best param found is reg = {0} \
with score {1}'.format(optreg, devaccuracy))
logging.info('Evaluating...')
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed)
clf.fit(self.train['X'], self.train['y'], validation_split=0.05)
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(self.train['X'], self.train['y'])
yhat = clf.predict(self.test['X'])
testaccuracy = clf.score(self.test['X'], self.test['y'])
testaccuracy = round(100*testaccuracy, 2)
return devaccuracy, testaccuracy, yhat
class SplitClassifier(object):
"""
(train, valid, test) split classifier.
"""
def __init__(self, X, y, config):
self.X = X
self.y = y
self.nclasses = config['nclasses']
self.featdim = self.X['train'].shape[1]
self.seed = config['seed']
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.cudaEfficient = False if 'cudaEfficient' not in config else \
config['cudaEfficient']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.noreg = False if 'noreg' not in config else config['noreg']
self.config = config
def run(self):
logging.info('Training {0} with standard validation..'
.format(self.modelname))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-2, 4, 1)]
if self.noreg:
regs = [1e-9 if self.usepytorch else 1e9]
scores = []
for reg in regs:
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed, cudaEfficient=self.cudaEfficient)
# TODO: Find a hack for reducing nb epoches in SNLI
clf.fit(self.X['train'], self.y['train'],
validation_data=(self.X['valid'], self.y['valid']))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(self.X['train'], self.y['train'])
scores.append(round(100*clf.score(self.X['valid'],
self.y['valid']), 2))
logging.info([('reg:'+str(regs[idx]), scores[idx])
for idx in range(len(scores))])
optreg = regs[np.argmax(scores)]
devaccuracy = np.max(scores)
logging.info('Validation : best param found is reg = {0} with score \
{1}'.format(optreg, devaccuracy))
clf = LogisticRegression(C=optreg, random_state=self.seed)
logging.info('Evaluating...')
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed, cudaEfficient=self.cudaEfficient)
# TODO: Find a hack for reducing nb epoches in SNLI
clf.fit(self.X['train'], self.y['train'],
validation_data=(self.X['valid'], self.y['valid']))
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(self.X['train'], self.y['train'])
testaccuracy = clf.score(self.X['test'], self.y['test'])
testaccuracy = round(100*testaccuracy, 2)
return devaccuracy, testaccuracy
| 10,358 | 40.939271 | 93 | py |
MCSE | MCSE-master/SentEval/senteval/tools/classifier.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Pytorch Classifier class in the style of scikit-learn
Classifiers include Logistic Regression and MLP
"""
from __future__ import absolute_import, division, unicode_literals
import numpy as np
import copy
from senteval import utils
import torch
from torch import nn
import torch.nn.functional as F
class PyTorchClassifier(object):
def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111,
cudaEfficient=False):
# fix seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.inputdim = inputdim
self.nclasses = nclasses
self.l2reg = l2reg
self.batch_size = batch_size
self.cudaEfficient = cudaEfficient
def prepare_split(self, X, y, validation_data=None, validation_split=None):
# Preparing validation data
assert validation_split or validation_data
if validation_data is not None:
trainX, trainy = X, y
devX, devy = validation_data
else:
permutation = np.random.permutation(len(X))
trainidx = permutation[int(validation_split * len(X)):]
devidx = permutation[0:int(validation_split * len(X))]
trainX, trainy = X[trainidx], y[trainidx]
devX, devy = X[devidx], y[devidx]
device = torch.device('cpu') if self.cudaEfficient else torch.device('cuda')
trainX = torch.from_numpy(trainX).to(device, dtype=torch.float32)
trainy = torch.from_numpy(trainy).to(device, dtype=torch.int64)
devX = torch.from_numpy(devX).to(device, dtype=torch.float32)
devy = torch.from_numpy(devy).to(device, dtype=torch.int64)
return trainX, trainy, devX, devy
def fit(self, X, y, validation_data=None, validation_split=None,
early_stop=True):
self.nepoch = 0
bestaccuracy = -1
stop_train = False
early_stop_count = 0
# Preparing validation data
trainX, trainy, devX, devy = self.prepare_split(X, y, validation_data,
validation_split)
# Training
while not stop_train and self.nepoch <= self.max_epoch:
self.trainepoch(trainX, trainy, epoch_size=self.epoch_size)
accuracy = self.score(devX, devy)
if accuracy > bestaccuracy:
bestaccuracy = accuracy
bestmodel = copy.deepcopy(self.model)
elif early_stop:
if early_stop_count >= self.tenacity:
stop_train = True
early_stop_count += 1
self.model = bestmodel
return bestaccuracy
def trainepoch(self, X, y, epoch_size=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + epoch_size):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
# forward
idx = torch.from_numpy(permutation[i:i + self.batch_size]).long().to(X.device)
Xbatch = X[idx]
ybatch = y[idx]
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
# loss
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += epoch_size
def score(self, devX, devy):
self.model.eval()
correct = 0
if not isinstance(devX, torch.cuda.FloatTensor) or self.cudaEfficient:
devX = torch.FloatTensor(devX).cuda()
devy = torch.LongTensor(devy).cuda()
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
ybatch = devy[i:i + self.batch_size]
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
pred = output.data.max(1)[1]
correct += pred.long().eq(ybatch.data.long()).sum().item()
accuracy = 1.0 * correct / len(devX)
return accuracy
def predict(self, devX):
self.model.eval()
if not isinstance(devX, torch.cuda.FloatTensor):
devX = torch.FloatTensor(devX).cuda()
yhat = np.array([])
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
output = self.model(Xbatch)
yhat = np.append(yhat,
output.data.max(1)[1].cpu().numpy())
yhat = np.vstack(yhat)
return yhat
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
vals = F.softmax(self.model(Xbatch).data.cpu().numpy())
if not probas:
probas = vals
else:
probas = np.concatenate(probas, vals, axis=0)
return probas
"""
MLP with Pytorch (nhid=0 --> Logistic Regression)
"""
class MLP(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
if params["nhid"] == 0:
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
).cuda()
else:
self.model = nn.Sequential(
nn.Linear(self.inputdim, params["nhid"]),
nn.Dropout(p=self.dropout),
nn.Sigmoid(),
nn.Linear(params["nhid"], self.nclasses),
).cuda()
self.loss_fn = nn.CrossEntropyLoss().cuda()
self.loss_fn.size_average = False
optim_fn, optim_params = utils.get_optimizer(self.optim)
self.optimizer = optim_fn(self.model.parameters(), **optim_params)
self.optimizer.param_groups[0]['weight_decay'] = self.l2reg
| 7,737 | 37.118227 | 94 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.