keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | marcdcfischer/PUNet | src/modules/instruction_model_simple.py | .py | 32,549 | 488 | from argparse import ArgumentParser, Namespace
import pytorch_lightning as pl
import torch
import torch.distributed as dist
from torch import nn
from torch.optim import AdamW
import torch.optim.lr_scheduler as lr_scheduler
from typing import Optional, Any, Dict, Union, List, Tuple
import pathlib as plb
from src.modules.architectures.momentum_model_simple import MomentumModelSimple
from src.modules.losses.contrastive_protos_teacher import ContrastiveProtosTeacherLoss
from src.utils.plotting import image_grid, similarities_student_teacher
from src.modules.losses import focal
import wandb
import monai
import monai.inferers as minferers
from monai.data import decollate_batch
import einops
import torch.nn.functional as F
import itertools
from functools import partial
import numpy as np
from src.data.transforms_monai import generate_test_post_transforms
import nibabel as nib
import warnings
# This module is used for the baseline architectures (available through monai)
class InstructionModelSimple(pl.LightningModule):
def __init__(self, conf: Union[Dict, Namespace], **kwargs):
super().__init__()
self.save_hyperparameters(conf)
print(f'Establishing architecture with parameters: \n {self.hparams}')
# Architecture
self.architecture = MomentumModelSimple(conf=self.hparams)
# Losses
# Segmentation losses
self.loss_seg = focal.FocalLoss(self.hparams.out_channels,
loss_weight=self.hparams.loss_weight_segmentation,
gamma=self.hparams.loss_weight_segmentation_gamma,
alpha_background=self.hparams.loss_weight_segmentation_alpha_background if not self.hparams.downstream else self.hparams.loss_weight_segmentation_alpha_background_downstream,
alpha_foreground=self.hparams.loss_weight_segmentation_alpha_foreground,
additive_alpha=self.hparams.additive_alpha)
# Contrastive losses
# Account for half res of wip architecture
reduction_factor_ = self.hparams.reduction_factor if self.hparams.architecture == "wip_simple" else self.hparams.reduction_factor * 2
reduction_factor_protos_ = self.hparams.reduction_factor_protos if self.hparams.architecture == "wip_simple" else self.hparams.reduction_factor_protos * 2
print(f'Using (adjusted) reduction factor: {reduction_factor_} and reduction_factor_protos: {reduction_factor_protos_}.')
self.loss_cluster_pairs = ContrastiveProtosTeacherLoss(
reduction_factor=reduction_factor_,
reduction_factor_protos=reduction_factor_protos_,
loss_weight=self.hparams.loss_weight_sim_protos if not self.hparams.downstream else self.hparams.loss_weight_sim_protos_downstream,
k_means_iterations=self.hparams.k_means_iterations,
use_weighting_protos=self.hparams.use_weighting_protos,
use_weighting_teacher=self.hparams.use_weighting_teacher,
fwhm_student_teacher=self.hparams.fwhm_student_teacher,
fwhm_teacher_protos=self.hparams.fwhm_teacher_protos,
)
# Metrics
self.score_seg_train = monai.metrics.DiceMetric(include_background=True, reduction='mean_batch')
self.score_seg_train_annotated = monai.metrics.DiceMetric(include_background=True, reduction='mean_batch')
self.score_seg_train_non_annotated = monai.metrics.DiceMetric(include_background=True, reduction='mean_batch')
self.score_seg_val = monai.metrics.DiceMetric(include_background=True, reduction='mean_batch')
self.score_seg_test = monai.metrics.DiceMetric(include_background=True, reduction='mean_batch')
def forward(self,
x: List[torch.Tensor],
x_teacher: Optional[torch.Tensor] = None,
teacher_prediction: bool = True,
second_student_prediction: bool = True):
x_teacher = x_teacher if teacher_prediction else None
x = x if second_student_prediction else x[:1]
dict_out_students, dict_out_teacher = self.architecture(x, x_teacher)
return dict_out_students, dict_out_teacher
def forward_prediction(self, x: torch.Tensor): # takes an image and predicts one (as required for monai sliding window inference)
dict_out_students, _ = self.architecture([x], None)
return dict_out_students[0]['dense']['embedded_latents']
def training_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
if batch_idx == 0:
torch.cuda.empty_cache() # helps with fragmentation
if self.current_epoch == 0:
print(f'Training with optimizer(s) {self.optimizers()}')
if self.hparams.downstream and self.lr_schedulers() is not None:
print(f'Training epoch {self.current_epoch} with step size(s) {self.lr_schedulers().get_last_lr()}')
for idx_step_size, step_size_ in enumerate(self.lr_schedulers().get_last_lr()):
self.log(f'train_step_size_{idx_step_size}', step_size_, sync_dist=True)
# batch selection
if self.hparams.orientation_swap:
batch = batch[batch_idx % len(batch)] # take only one of the loaded batches per batch_idx
else:
batch = batch[0]
# Batch preparations
# Aux content for one of the cateogires (student large) only
aux_names = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'name' in str(x_)])))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
aux_frames = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'frame' in str(x_)]))))
aux_domains = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'domain' in str(x_)]))))
aux_annotated = torch.stack(list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'annotated' in str(x_)]))))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
# Image content
x_students = [einops.rearrange(batch['image'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms)] # Concatenate differently transformed elements in batch dim
y_students = [einops.rearrange(batch['label'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms).float().round().long()[:, 0, ...]]
for idx_student in range(self.hparams.n_students - 1):
x_students.append(einops.rearrange(batch[f'image_var{idx_student}'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms))
y_students.append(einops.rearrange(batch[f'label_var{idx_student}'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms).float().round().long()[:, 0, ...])
x_teacher = einops.rearrange(batch['image_teacher'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms)
y_teacher = einops.rearrange(batch['label_teacher'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms).float().round().long()[:, 0, ...]
y_students_one_hot = [einops.rearrange(F.one_hot(x_, num_classes=self.hparams.out_channels), 'b h w d c -> b c h w d') for x_ in y_students]
y_teacher_one_hot = einops.rearrange(F.one_hot(y_teacher, num_classes=self.hparams.out_channels), 'b h w d c -> b c h w d')
coord_grids_students = None
coord_grids_teacher = None
if 'coord_grid' in batch.keys():
coord_grids_students = [einops.rearrange(batch['coord_grid'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms)]
for idx_student in range(self.hparams.n_students - 1):
coord_grids_students.append(einops.rearrange(batch[f'coord_grid_var{idx_student}'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms))
coord_grids_teacher = einops.rearrange(batch['coord_grid_teacher'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms)
# Momentum update
self.architecture.update_teacher()
# prediction
dict_out_students, dict_out_teacher = self(x_students,
x_teacher,
teacher_prediction=self.loss_cluster_pairs.loss_weight > 0.,
second_student_prediction=self.loss_cluster_pairs.loss_weight > 0.)
# Loss calculation
loss_dict, plots_dict = dict(), dict()
# Segmentation
if self.hparams.loss_weight_segmentation > 0.:
for idx_student in range(len(dict_out_students)):
loss_dict.update(self.loss_seg(dict_out_students[idx_student]['dense']['embedded_latents'][aux_annotated, ...], y_students[idx_student][aux_annotated, ...], tag=f'seg_s{idx_student}'))
else:
loss_dict['seg'] = torch.tensor(0., device=self.device, requires_grad=False)
# Self-supervised contrastive
if self.hparams.contrastive_pairs:
if self.loss_cluster_pairs.loss_weight > 0.:
loss_tmp_, plots_tmp_ = self.loss_cluster_pairs(embeddings_students=[x_['patched']['embedded_latents'] for x_ in dict_out_students],
embeddings_teacher=dict_out_teacher['patched']['embedded_latents'],
frames=aux_frames,
coord_grids_students=coord_grids_students,
coord_grids_teacher=coord_grids_teacher)
loss_dict.update(loss_tmp_), plots_dict.update(plots_tmp_)
else:
for idx_student in range(len(dict_out_students)):
loss_dict[f'contrastive_proxy_sim_clustered_s{idx_student}'] = torch.tensor(0., device=self.device, requires_grad=False)
loss_dict['all'] = sum(loss_dict.values())
with torch.no_grad():
# metrics
for idx_student in range(len(dict_out_students)):
predictions_one_hot_ = F.one_hot(dict_out_students[idx_student]['dense']['embedded_latents'].argmax(dim=1).long(), num_classes=self.hparams.out_channels).permute(0, 4, 1, 2, 3)
self.score_seg_train(y_pred=predictions_one_hot_,
y=y_students_one_hot[idx_student])
if any(aux_annotated):
self.score_seg_train_annotated(y_pred=predictions_one_hot_[aux_annotated, ...],
y=y_students_one_hot[idx_student][aux_annotated, ...])
if any(~aux_annotated):
self.score_seg_train_non_annotated(y_pred=predictions_one_hot_[~aux_annotated, ...],
y=y_students_one_hot[idx_student][~aux_annotated, ...])
# logging
for key_, value_ in loss_dict.items():
self.log(f'train_loss_{key_}', value_.detach().cpu())
# plotting
if self.hparams.plot and (self.current_epoch + 1) % self.hparams.plot_interval_train == 0 and batch_idx % 10 == 0 and batch_idx <= 20:
if not dist.is_initialized() or dist.get_rank() < 1:
# Fetch valid elements (if sim of plots has only been calculated for valid elements)
valid_entries = np.array(aux_frames) == np.unique(aux_frames)[0]
png_paths = list()
for idx_student in range(len(dict_out_students)):
png_paths.extend(similarities_student_teacher.visualize_similarities_student_teacher({k_: v_ for k_, v_ in plots_dict.items() if f'{idx_student}' in k_}, x_students[idx_student], x_teacher,
prefix=f'{self.hparams.run_name}_train_b{batch_idx}_s{idx_student}_ep{str(self.current_epoch).zfill(3)}', path_plots=self.hparams.default_root_dir))
for identifier_, dict_, x_, y_one_hot_, in zip(['student', 'teacher'], [dict_out_students[idx_student], dict_out_teacher], [x_students[idx_student], x_teacher], [y_students_one_hot[idx_student], y_teacher_one_hot]):
if dict_ is not None:
png_paths.extend(image_grid.plot_grid_middle(x_.detach().cpu(),
y_one_hot_.detach().cpu(),
torch.softmax(dict_['dense']['embedded_latents'].detach().cpu().float(), dim=1),
None,
indices_elements=[idx_ * 2 for idx_ in range(max(min(x_students[0].shape[0] // 2, 2), 1))],
prefix=f'{self.hparams.run_name}_train_b{batch_idx}_s{idx_student}_ep{str(self.current_epoch).zfill(3)}_{identifier_}', path_plots=self.hparams.default_root_dir))
if self.hparams.online_on:
[self.logger[1].experiment.log({str(plb.Path(png_).stem[:-6]): wandb.Image(png_)}) for png_ in png_paths]
return {'loss': loss_dict['all']}
def training_epoch_end(self, outputs) -> None:
self.log(f'train_loss_mean', torch.mean(torch.tensor([x_['loss'] for x_ in outputs], dtype=torch.float)), sync_dist=True)
for scorer_, tag_ in zip([self.score_seg_train], ['all']): # self.score_seg_train_annotated, self.score_seg_train_non_annotated], ['all', 'annotated', 'non_annotated']):
try:
score_seg = scorer_.aggregate()
scorer_.reset()
self.log_dict({f'train_dice_{tag_}_c{str(idx_c).zfill(2)}': score_seg[idx_c] for idx_c in range(len(score_seg))}, sync_dist=True)
self.log(f'train_dice_{tag_}_mean', torch.mean(score_seg[1:]), sync_dist=True)
except:
pass
def validation_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
if batch_idx == 0:
torch.cuda.empty_cache() # helps with fragmentation
# batch preparations
aux_names = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'name' in x_])))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
aux_frames = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'frame' in x_]))))
aux_domains = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'domain' in x_]))))
aux_annotated = torch.stack(list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'annotated' in x_]))))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
x = batch['image']
y = batch['label'].float().round().long()[:, 0, ...]
y_one_hot = einops.rearrange(F.one_hot(y, num_classes=self.hparams.out_channels), 'b h w d c -> b c h w d')
scribbles, scribbles_masked = None, None
if 'scribbles' in batch.keys():
scribbles = batch['scribbles'][:, 0, ...]
scribbles_masked = scribbles.clone()
scribbles_masked[~aux_annotated, ...] = self.hparams.out_channels # Mask non-annotated
coord_grids = None
if 'coord_grid' in batch.keys():
coord_grids = batch['coord_grid']
# Activate respective instructions
loss_dict_all = list()
# Sliding Window Inference - by teacher
volume_prediction = minferers.sliding_window_inference(inputs=x,
roi_size=self.hparams.patch_size_students[0],
sw_batch_size=self.hparams.batch_size,
predictor=partial(self.forward_prediction),
overlap=self.hparams.sliding_window_overlap)
# Loss calculation
loss_dict, plots_dict = dict(), dict()
# Segmentation
if any(aux_annotated) and self.hparams.loss_weight_segmentation > 0.:
loss_dict.update(self.loss_seg(volume_prediction[aux_annotated, ...], y[aux_annotated, ...]))
else:
loss_dict['seg'] = torch.tensor(0., device=self.device, requires_grad=False)
# Self-supervised contrastive not available in the current setup, since patched embeddings would be required
loss_dict['all'] = sum(loss_dict.values())
loss_dict_all.append(loss_dict)
with torch.no_grad():
# metrics
predictions_one_hot = F.one_hot(volume_prediction.argmax(dim=1).long(), num_classes=self.hparams.out_channels).permute(0, 4, 1, 2, 3)
self.score_seg_val(y_pred=predictions_one_hot,
y=y_one_hot)
# plotting
if self.hparams.plot and (self.current_epoch + 1) % self.hparams.plot_interval_val == 0 and batch_idx % 10 == 0 and batch_idx <= 20:
if not dist.is_initialized() or dist.get_rank() < 1:
png_paths = list()
png_paths.extend(image_grid.plot_grid_middle(x.detach().cpu(), y_one_hot.detach().cpu(),
torch.softmax(volume_prediction.detach().cpu(), dim=1),
scribbles.detach().cpu() if scribbles is not None else None,
indices_elements=[idx_ * 2 for idx_ in range(max(min(x.shape[0] // 2, 2), 1))],
prefix=f'{self.hparams.run_name}_val_b{batch_idx}_s0_c-all_ep{str(self.current_epoch).zfill(3)}', path_plots=self.hparams.default_root_dir))
if self.hparams.online_on:
[self.logger[1].experiment.log({str(plb.Path(png_).stem[:-6]): wandb.Image(png_)}) for png_ in png_paths]
loss_dict_all = {k_: sum(dict_[k_] for dict_ in loss_dict_all) / len(loss_dict_all) for k_ in loss_dict_all[0].keys()}
with torch.no_grad():
# logging
self.log(f'val_epoch', self.current_epoch, sync_dist=True) # log for ckpt selection
for key_, value_ in loss_dict_all.items():
self.log(f'val_loss_{key_}', value_, sync_dist=True) # logs per epoch
return {'loss': loss_dict_all['all']}
def validation_epoch_end(self, outputs) -> None:
self.log(f'val_loss_mean', torch.mean(torch.tensor([x_['loss'] for x_ in outputs], dtype=torch.float)), sync_dist=True)
score_seg = self.score_seg_val.aggregate()
self.score_seg_val.reset()
self.log_dict({f'val_dice_c{str(idx_c).zfill(2)}': score_seg[idx_c] for idx_c in range(len(score_seg))}, sync_dist=True)
self.log(f'val_dice_mean', torch.mean(score_seg[1:]), sync_dist=True)
def test_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
if batch_idx == 0:
torch.cuda.empty_cache() # helps with fragmentation
# batch preparations
aux_names = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'name' in x_])))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
aux_frames = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'frame' in x_]))))
aux_domains = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'domain' in x_]))))
aux_annotated = torch.stack(list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'annotated' in x_]))))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
x = batch['image']
y = batch['label'].float().round().long()[:, 0, ...]
y_one_hot = einops.rearrange(F.one_hot(y, num_classes=self.hparams.out_channels), 'b h w d c -> b c h w d')
scribbles, scribbles_masked = None, None
if 'scribbles' in batch.keys():
scribbles = batch['scribbles'][:, 0, ...]
scribbles_masked = scribbles.clone()
scribbles_masked[~aux_annotated, ...] = self.hparams.out_channels # Mask non-annotated
coord_grids = None
if 'coord_grid' in batch.keys():
coord_grids = batch['coord_grid']
# Activate respective instructions
loss_dict_all = list()
# Sliding Window Inference - by teacher
volume_prediction = minferers.sliding_window_inference(inputs=x,
roi_size=self.hparams.patch_size_students[0],
sw_batch_size=self.hparams.batch_size,
predictor=partial(self.forward_prediction),
overlap=self.hparams.sliding_window_overlap)
# Loss calculation
loss_dict, plots_dict = dict(), dict()
# Segmentation
if any(aux_annotated) and self.hparams.loss_weight_segmentation > 0.:
loss_dict.update(self.loss_seg(volume_prediction[aux_annotated, ...], y[aux_annotated, ...]))
else:
loss_dict['seg'] = torch.tensor(0., device=self.device, requires_grad=False)
# Self-supervised contrastive not available in the current setup, since patched embeddings would be required
loss_dict['all'] = sum(loss_dict.values())
loss_dict_all.append(loss_dict)
with torch.no_grad():
# metrics
predictions_one_hot = F.one_hot(volume_prediction.argmax(dim=1).long(), num_classes=self.hparams.out_channels).permute(0, 4, 1, 2, 3)
self.score_seg_test(y_pred=predictions_one_hot,
y=y_one_hot)
# save predictions
test_viz = False
if test_viz:
import matplotlib
matplotlib.use('tkagg')
argmaxed = torch.argmax(volume_prediction[0, ...], dim=0)
viewer = nib.viewers.OrthoSlicer3D(np.array((argmaxed / argmaxed.max()).detach().cpu()))
viewer.clim = [0.0, 1.0]
viewer.show()
post_transform = generate_test_post_transforms(output_dir=self.hparams.export_dir,
output_postfix=f'pred_cat-all',
transform_test=self.trainer.datamodule.transform_test,
n_classes=None)
batch['pred'] = volume_prediction
[post_transform(x_) for x_ in decollate_batch(batch)]
# plotting
if self.hparams.plot and (self.current_epoch + 1) % self.hparams.plot_interval_test == 0 and batch_idx % 10 == 0 and batch_idx <= 20:
if not dist.is_initialized() or dist.get_rank() < 1:
png_paths = list()
png_paths.extend(image_grid.plot_grid_middle(x.detach().cpu(), y_one_hot.detach().cpu(),
torch.softmax(volume_prediction.detach().cpu(), dim=1),
scribbles.detach().cpu() if scribbles is not None else None,
indices_elements=[idx_ * 2 for idx_ in range(max(min(x.shape[0] // 2, 2), 1))],
prefix=f'{self.hparams.run_name}_test_b{batch_idx}_s0_c-all_ep{str(self.current_epoch).zfill(3)}', path_plots=self.hparams.default_root_dir))
if self.hparams.online_on:
[self.logger[1].experiment.log({str(plb.Path(png_).stem[:-6]): wandb.Image(png_)}) for png_ in png_paths]
loss_dict_all = {k_: sum(dict_[k_] for dict_ in loss_dict_all) / len(loss_dict_all) for k_ in loss_dict_all[0].keys()}
with torch.no_grad():
# logging
self.log(f'test_epoch', self.current_epoch, sync_dist=True) # log for ckpt selection
for key_, value_ in loss_dict_all.items():
self.log(f'test_loss_{key_}', value_, sync_dist=True) # logs per epoch
return {'loss': loss_dict_all['all']}
def test_epoch_end(self, outputs) -> None:
self.log(f'test_loss_mean', torch.mean(torch.tensor([x_['loss'] for x_ in outputs], dtype=torch.float)), sync_dist=True)
score_seg = self.score_seg_test.aggregate()
self.score_seg_test.reset()
self.log_dict({f'test_dice_c{str(idx_c).zfill(2)}': score_seg[idx_c] for idx_c in range(len(score_seg))}, sync_dist=True)
self.log(f'test_dice_mean', torch.mean(score_seg[1:]), sync_dist=True)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
pass
def configure_optimizers(self):
if self.hparams.architecture == "wip_simple":
optimizer = AdamW([{'params': [param_ for name_, param_ in self.architecture.network_student.get_named_body_parameters()]},
{'params': [param_ for name_, param_ in self.architecture.network_student.get_named_instruction_parameters()],
'lr': self.hparams.learning_rate_instructions if not self.hparams.downstream else self.hparams.learning_rate_instructions_downstream}],
lr=self.hparams.learning_rate if not self.hparams.downstream else self.hparams.learning_rate_downstream,
weight_decay=self.hparams.weight_decay if not self.hparams.downstream else self.hparams.weight_decay_downstream)
else:
optimizer = AdamW(self.parameters(),
lr=self.hparams.learning_rate if not self.hparams.downstream else self.hparams.learning_rate_downstream,
weight_decay=self.hparams.weight_decay if not self.hparams.downstream else self.hparams.weight_decay_downstream)
if self.hparams.downstream and self.hparams.with_scheduler_downstream:
assert self.hparams.max_epochs is not None
print(f'Using one cycle lr scheduler for {self.hparams.max_epochs} epochs and {1} steps per epoch.')
scheduler = lr_scheduler.OneCycleLR(optimizer,
max_lr=[self.hparams.learning_rate_downstream, self.hparams.learning_rate_instructions_downstream] if self.hparams.architecture == "wip_simple" else self.hparams.learning_rate_downstream,
total_steps=None,
epochs=self.hparams.max_epochs,
steps_per_epoch=1, # The amount of scheduler.step() performed in an epoch. Probably defaults to 1 for lightning.
pct_start=0.1,
anneal_strategy='cos',
cycle_momentum=True,
base_momentum=0.85,
max_momentum=0.95,
div_factor=20, # 1e-2 / 1e2 = 1e-4
final_div_factor=1, # 1e-4 / 1 = 1e-4
three_phase=False,
last_epoch=- 1,
verbose=False)
return [optimizer], [scheduler]
return optimizer
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
state_dict = checkpoint["state_dict"].copy()
model_state_dict = self.state_dict()
is_changed = False
for k in state_dict:
if k in model_state_dict:
# Adjust parameters with size mismatch
if state_dict[k].shape != model_state_dict[k].shape:
print(f"Skip loading parameter: {k}, "
f"required shape: {model_state_dict[k].shape}, "
f"loaded shape: {state_dict[k].shape}")
checkpoint["state_dict"][k] = model_state_dict[k]
is_changed = True
else:
# Remove parameters not in the actual model
warnings.warn(f"Dropping parameter: {k}")
del checkpoint["state_dict"][k]
is_changed = True
if is_changed:
checkpoint.pop("optimizer_states", None)
def on_save_checkpoint(self, checkpoint):
if self.hparams.s3_bucket and self.hparams.online_on:
print(f'\rUploading checkpoint to {self.hparams.ckpt_dir} ...')
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--learning_rate', default=1e-4, type=float)
parser.add_argument('--learning_rate_downstream', default=5e-4, type=float)
parser.add_argument('--learning_rate_instructions', default=1e-3, type=float)
parser.add_argument('--learning_rate_instructions_downstream', default=5e-3, type=float)
parser.add_argument('--weight_decay', default=1e-2, type=float)
parser.add_argument('--weight_decay_downstream', default=0, type=float)
parser.add_argument('--with_scheduler_downstream', default=True, type=bool)
parser.add_argument('--sliding_window_overlap', default=0.5, type=float)
# Segmentation loss
parser.add_argument('--loss_weight_segmentation', default=1e-0, type=float)
parser.add_argument('--loss_weight_segmentation_gamma', default=4.0, type=float)
parser.add_argument('--loss_weight_segmentation_alpha_background', default=1.0, type=float)
parser.add_argument('--loss_weight_segmentation_alpha_background_downstream', default=1.0, type=float) # May use higher value than during training, to avoid fast collapse of background into foreground.
parser.add_argument('--loss_weight_segmentation_alpha_foreground', default=1.0, type=float)
# Contrastive loss
parser.add_argument('--contrastive_pairs', default=True, type=bool)
parser.add_argument('--loss_weight_sim_paired', default=0., type=float)
parser.add_argument('--loss_weight_sim_protos', default=1e-2, type=float)
parser.add_argument('--loss_weight_sim_protos_downstream', default=0, type=float)
parser.add_argument('--loss_weight_sim_closest', default=0., type=float)
parser.add_argument('--loss_weight_dissim_closest', default=0., type=float)
parser.add_argument('--k_means_iterations', default=3, type=int)
parser.add_argument('--reduction_factor', default=2., type=float)
parser.add_argument('--reduction_factor_protos', default=8., type=float)
parser.add_argument('--fwhm_student_teacher', default=128., type=float)
parser.add_argument('--fwhm_teacher_protos', default=128., type=float)
parser.add_argument('--use_weighting_protos', default=True, type=bool)
parser.add_argument('--use_weighting_teacher', default=False, type=bool)
# Misc
parser.add_argument('--label_indices_max_active', default=-1, type=int) # Should stay negative for simple case.
parser.add_argument('--downstream', action='store_true')
parser.add_argument('--label_indices_base', default=[], nargs='*', type=int) # Does not have any effect atm.
parser.add_argument('--label_indices_downstream_active', default=[], nargs='*', type=int) # Does not have any effect atm.
parser.add_argument('--separate_background', default=False, type=bool) # Should stay False for simple case.
return parser
| Python |
3D | marcdcfischer/PUNet | src/modules/instruction_model.py | .py | 55,172 | 757 | from argparse import ArgumentParser, Namespace
import pytorch_lightning as pl
import torch
import torch.distributed as dist
from torch.optim import AdamW
import torch.optim.lr_scheduler as lr_scheduler
from typing import Optional, Any, Dict, Union, List, Tuple
import pathlib as plb
from src.modules.architectures.momentum_model import MomentumModel
from src.modules.losses.contrastive_protos_teacher import ContrastiveProtosTeacherLoss
from src.utils.plotting import image_grid, similarities_student_teacher
from src.modules.losses import focal
import wandb
import monai
import monai.inferers as minferers
from monai.data import decollate_batch
import einops
import torch.nn.functional as F
import itertools
from functools import partial
import numpy as np
from src.data.transforms_monai import generate_test_post_transforms
import nibabel as nib
import warnings
class InstructionModel(pl.LightningModule):
def __init__(self, conf: Union[Dict, Namespace], **kwargs):
super().__init__()
self.save_hyperparameters(conf)
self.pseudo_indices_subject_idx = None
self.pseudo_indices_label_idx = None
self.label_indices_active = None
self.label_indices_frozen = None
self.mode_loss = None
print(f'Establishing architecture with parameters: \n {self.hparams}')
# Architecture
self.architecture = MomentumModel(conf=self.hparams)
# Losses
# Segmentation losses
self.loss_seg = focal.FocalLoss(self.hparams.out_channels,
loss_weight=self.hparams.loss_weight_segmentation,
gamma=self.hparams.loss_weight_segmentation_gamma,
alpha_background=self.hparams.loss_weight_segmentation_alpha_background if not self.hparams.downstream else self.hparams.loss_weight_segmentation_alpha_background_downstream,
alpha_foreground=self.hparams.loss_weight_segmentation_alpha_foreground,
additive_alpha=self.hparams.additive_alpha)
# Contrastive losses
self.loss_cluster_pairs = ContrastiveProtosTeacherLoss(
reduction_factor=self.hparams.reduction_factor,
reduction_factor_protos=self.hparams.reduction_factor_protos,
loss_weight=self.hparams.loss_weight_sim_protos if not self.hparams.downstream else self.hparams.loss_weight_sim_protos_downstream,
k_means_iterations=self.hparams.k_means_iterations,
use_weighting_protos=self.hparams.use_weighting_protos,
use_weighting_teacher=self.hparams.use_weighting_teacher,
fwhm_student_teacher=self.hparams.fwhm_student_teacher,
fwhm_teacher_protos=self.hparams.fwhm_teacher_protos,
)
# Metrics
self.score_seg_train = monai.metrics.DiceMetric(include_background=True, reduction='mean_batch')
self.score_seg_train_annotated = monai.metrics.DiceMetric(include_background=True, reduction='mean_batch')
self.score_seg_train_non_annotated = monai.metrics.DiceMetric(include_background=True, reduction='mean_batch')
self.score_seg_val = monai.metrics.DiceMetric(include_background=True, reduction='mean_batch')
self.score_seg_test = monai.metrics.DiceMetric(include_background=True, reduction='mean_batch')
def forward(self,
x: List[torch.Tensor],
x_teacher: Optional[torch.Tensor] = None,
label_indices: Optional[torch.Tensor] = None,
pseudo_indices_subject: Optional[torch.Tensor] = None,
pseudo_indices_label: Optional[torch.Tensor] = None,
mode_label: str = 'label',
mode_loss: str = 'both',
teacher_prediction: bool = True,
second_student_prediction: bool = True):
x_teacher = x_teacher if teacher_prediction else None
x = x if second_student_prediction else x[:1]
dict_out_students, dict_out_teacher = self.architecture(x, x_teacher, label_indices, pseudo_indices_subject, pseudo_indices_label,
mode_label=mode_label, mode_loss=mode_loss)
return dict_out_students, dict_out_teacher
def forward_prediction(self, x: torch.Tensor, label_indices: torch.Tensor): # takes an image and predicts one (as required for monai sliding window inference)
label_indices = label_indices.expand(x.shape[0], -1) # Expand instructions to all patches (from the sliding window)
dict_out_students, _ = self.architecture([x], None, label_indices)
return dict_out_students[0]['dense']['embedded_latents']
def training_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
if batch_idx == 0:
torch.cuda.empty_cache() # helps with fragmentation
if self.current_epoch == 0:
print(f'Training with optimizer(s) {self.optimizers()}')
if self.hparams.downstream and self.lr_schedulers() is not None:
print(f'Training epoch {self.current_epoch} with step size(s) {self.lr_schedulers().get_last_lr()}')
for idx_step_size, step_size_ in enumerate(self.lr_schedulers().get_last_lr()):
self.log(f'train_step_size_{idx_step_size}', step_size_, sync_dist=True)
# batch selection
if self.hparams.orientation_swap:
batch = batch[batch_idx % len(batch)] # take only one of the loaded batches per batch_idx
else:
batch = batch[0]
self.mode_loss = 'both'
# Batch preparations
# Aux content for one of the categories (student large) only
aux_names = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'name' in str(x_)])))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
aux_frames = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'frame' in str(x_)]))))
aux_domains = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'domain' in str(x_)]))))
aux_annotated = torch.stack(list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'annotated' in str(x_)]))))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
# Image content
x_students = [einops.rearrange(batch['image'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms)] # Concatenate differently transformed elements in batch dim
y_students = [einops.rearrange(batch['label'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms).float().round().long()[:, 0, ...]]
for idx_student in range(self.hparams.n_students - 1):
x_students.append(einops.rearrange(batch[f'image_var{idx_student}'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms))
y_students.append(einops.rearrange(batch[f'label_var{idx_student}'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms).float().round().long()[:, 0, ...])
x_teacher = einops.rearrange(batch['image_teacher'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms)
y_teacher = einops.rearrange(batch['label_teacher'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms).float().round().long()[:, 0, ...]
y_students_one_hot = [einops.rearrange(F.one_hot(x_, num_classes=self.hparams.out_channels), 'b h w d c -> b c h w d') for x_ in y_students]
y_teacher_one_hot = einops.rearrange(F.one_hot(y_teacher, num_classes=self.hparams.out_channels), 'b h w d c -> b c h w d')
coord_grids_students = None
coord_grids_teacher = None
if 'coord_grid' in batch.keys():
coord_grids_students = [einops.rearrange(batch['coord_grid'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms)]
for idx_student in range(self.hparams.n_students - 1):
coord_grids_students.append(einops.rearrange(batch[f'coord_grid_var{idx_student}'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms))
coord_grids_teacher = einops.rearrange(batch['coord_grid_teacher'], 'b (t c) d h w -> (b t) c d h w', t=self.hparams.num_transforms)
# Switch label indices (and thereby instructions)
self.set_active_indices_training(batch_size=x_students[0].shape[0], device=y_students[0].device)
self.set_frozen_indices(device=y_students[0].device)
y_one_hot_gathered, y_gathered, y_one_hot_active = list(zip(*[self.gather_labels(targets_one_hot=x_) for x_ in y_students_one_hot]))
y_teacher_one_hot_gathered, y_teacher_gathered, y_teacher_one_hot_active = self.gather_labels(targets_one_hot=y_teacher_one_hot)
# Enable / disable learning for bulk of interpreter
if self.hparams.selective_freezing:
self.selective_freezing(batch_idx=batch_idx)
# Momentum update
self.architecture.update_teacher()
# prediction
dict_out_students, dict_out_teacher = self(x_students,
x_teacher,
label_indices=self.label_indices_active,
pseudo_indices_subject=self.pseudo_indices_subject_idx,
pseudo_indices_label=self.pseudo_indices_label_idx,
mode_label='label',
mode_loss=self.mode_loss,
teacher_prediction=self.loss_cluster_pairs.loss_weight > 0.,
second_student_prediction=self.loss_cluster_pairs.loss_weight > 0.)
# Loss calculation
loss_dict, plots_dict = dict(), dict()
# Segmentation
for idx_student in range(len(dict_out_students)):
if (self.mode_loss == 'seg' or self.mode_loss == 'both') and self.hparams.loss_weight_segmentation > 0.: # any(aux_annotated):
if not self.hparams.downstream:
loss_dict.update(self.loss_seg(dict_out_students[idx_student]['dense']['embedded_latents'][aux_annotated, ...],
y_gathered[idx_student][aux_annotated, ...],
label_indices_active=self.label_indices_active,
tag=f'seg_s{idx_student}'))
else:
dense_embedded_latents_scattered = self.scatter_predictions(dict_out_students[idx_student]['dense']['embedded_latents'])
dense_embedded_latents_nonfrozen = self.gather_non_frozen_predictions(dense_embedded_latents_scattered)
y_one_hot_non_frozen_, y_non_frozen_ = self.gather_non_frozen_labels(y_students_one_hot[idx_student])
loss_dict.update(self.loss_seg(dense_embedded_latents_nonfrozen[aux_annotated, ...],
y_non_frozen_[aux_annotated, ...],
label_indices_active=self.label_indices_active,
tag=f'seg_s{idx_student}'))
else:
loss_dict[f'seg_s{idx_student}'] = torch.tensor(0., device=self.device, requires_grad=False)
loss_dict[f'seg_pseudo_s{idx_student}'] = torch.tensor(0., device=self.device, requires_grad=False)
# Self-supervised contrastive
if self.hparams.contrastive_pairs:
if (self.mode_loss == 'self' or self.mode_loss == 'both') and self.loss_cluster_pairs.loss_weight > 0.:
loss_tmp_, plots_tmp_ = self.loss_cluster_pairs(embeddings_students=[x_['patched']['embedded_latents'] for x_ in dict_out_students],
embeddings_teacher=dict_out_teacher['patched']['embedded_latents'],
frames=aux_frames,
coord_grids_students=coord_grids_students,
coord_grids_teacher=coord_grids_teacher)
loss_dict.update(loss_tmp_), plots_dict.update(plots_tmp_)
else:
for idx_student in range(len(dict_out_students)):
loss_dict[f'contrastive_proxy_sim_clustered_s{idx_student}'] = torch.tensor(0., device=self.device, requires_grad=False)
loss_dict['all'] = sum(loss_dict.values())
with torch.no_grad():
# metrics
for idx_student in range(len(dict_out_students)):
dense_embedded_latents_scattered_ = self.scatter_predictions(dict_out_students[idx_student]['dense']['embedded_latents'])
predictions_one_hot_ = F.one_hot(dense_embedded_latents_scattered_.argmax(dim=1).long(), num_classes=self.hparams.out_channels).permute(0, 4, 1, 2, 3)
self.score_seg_train(y_pred=predictions_one_hot_,
y=y_one_hot_active[idx_student])
if any(aux_annotated):
self.score_seg_train_annotated(y_pred=predictions_one_hot_[aux_annotated, ...],
y=y_one_hot_active[idx_student][aux_annotated, ...])
if any(~aux_annotated):
self.score_seg_train_non_annotated(y_pred=predictions_one_hot_[~aux_annotated, ...],
y=y_one_hot_active[idx_student][~aux_annotated, ...])
# logging
for key_, value_ in loss_dict.items():
self.log(f'train_loss_{key_}', value_.detach().cpu())
# plotting
if self.hparams.plot and (self.current_epoch + 1) % self.hparams.plot_interval_train == 0 and batch_idx % 10 == 0 and batch_idx <= 20:
if not dist.is_initialized() or dist.get_rank() < 1:
# Fetch valid elements (if sim of plots has only been calculated for valid elements)
valid_entries = np.array(aux_frames) == np.unique(aux_frames)[0]
png_paths = list()
for idx_student in range(len(dict_out_students)):
png_paths.extend(similarities_student_teacher.visualize_similarities_student_teacher({k_: v_ for k_, v_ in plots_dict.items() if f'{idx_student}' in k_}, x_students[idx_student], x_teacher, y_teacher,
prefix=f'{self.hparams.run_name}_train_b{batch_idx}_s{idx_student}_ep{str(self.current_epoch).zfill(3)}', path_plots=self.hparams.default_root_dir))
y_one_hot_list_ = [y_one_hot_gathered[idx_student], y_teacher_one_hot_gathered]
for identifier_, dict_, x_, y_one_hot_, in zip(['student', 'teacher'], [dict_out_students[idx_student], dict_out_teacher], [x_students[idx_student], x_teacher], y_one_hot_list_):
if dict_ is not None:
png_paths.extend(image_grid.plot_grid_middle(x_.detach().cpu(),
y_one_hot_.detach().cpu(),
torch.softmax(dict_['dense']['embedded_latents'].detach().cpu().float(), dim=1),
None,
indices_elements=[idx_ * 2 for idx_ in range(max(min(x_students[0].shape[0] // 2, 2), 1))],
prefix=f'{self.hparams.run_name}_train_b{batch_idx}_s{idx_student}_ep{str(self.current_epoch).zfill(3)}_{identifier_}', path_plots=self.hparams.default_root_dir))
if self.hparams.online_on:
[self.logger[1].experiment.log({str(plb.Path(png_).stem[:-6]): wandb.Image(png_)}) for png_ in png_paths]
return {'loss': loss_dict['all']}
def training_epoch_end(self, outputs) -> None:
self.log(f'train_loss_mean', torch.mean(torch.tensor([x_['loss'] for x_ in outputs], dtype=torch.float)), sync_dist=True)
for scorer_, tag_ in zip([self.score_seg_train], ['all']): # self.score_seg_train_annotated, self.score_seg_train_non_annotated], ['all', 'annotated', 'non_annotated']):
try:
score_seg = scorer_.aggregate()
scorer_.reset()
self.log_dict({f'train_dice_{tag_}_c{str(idx_c).zfill(2)}': score_seg[idx_c] for idx_c in range(len(score_seg))}, sync_dist=True)
self.log(f'train_dice_{tag_}_mean', torch.mean(score_seg[1:]), sync_dist=True)
except:
pass
def validation_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
if batch_idx == 0:
torch.cuda.empty_cache() # helps with fragmentation
# batch preparations
aux_names = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'name' in x_])))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
aux_frames = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'frame' in x_]))))
aux_domains = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'domain' in x_]))))
aux_annotated = torch.stack(list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'annotated' in x_]))))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
x = batch['image']
y = batch['label'].float().round().long()[:, 0, ...]
y_one_hot = einops.rearrange(F.one_hot(y, num_classes=self.hparams.out_channels), 'b h w d c -> b c h w d')
scribbles, scribbles_masked = None, None
if 'scribbles' in batch.keys():
scribbles = batch['scribbles'][:, 0, ...]
scribbles_masked = scribbles.clone()
scribbles_masked[~aux_annotated, ...] = self.hparams.out_channels # Mask non-annotated
coord_grids = None
if 'coord_grid' in batch.keys():
coord_grids = batch['coord_grid']
# Activate respective instructions
loss_dict_all = list()
foreground_labels = self.hparams.label_indices_downstream_active if self.hparams.downstream else self.hparams.label_indices_base
foreground_labels = foreground_labels if self.hparams.downstream else []
for idx_category in range(len(foreground_labels)):
self.set_active_indices_validation(batch_size=x.shape[0], foreground_category=foreground_labels[idx_category], device=y.device)
self.set_frozen_indices(device=y.device)
y_one_hot_gathered, y_gathered, y_one_hot_active = self.gather_labels(targets_one_hot=y_one_hot)
# Sliding Window Inference - by teacher
print(f'Evaluating batch {batch_idx} for category {foreground_labels[idx_category]} ({idx_category + 1}/{len(foreground_labels)}) with active indices {self.label_indices_active}.')
volume_prediction = minferers.sliding_window_inference(inputs=x,
roi_size=self.hparams.patch_size_students[0],
sw_batch_size=self.hparams.batch_size,
predictor=partial(self.forward_prediction, label_indices=self.label_indices_active),
overlap=self.hparams.sliding_window_overlap)
# Loss calculation
loss_dict, plots_dict = dict(), dict()
# Segmentation
if any(aux_annotated):
if not self.hparams.downstream and self.hparams.loss_weight_segmentation > 0.:
loss_dict.update(self.loss_seg(volume_prediction[aux_annotated, ...],
y_gathered[aux_annotated, ...],
label_indices_active=self.label_indices_active))
else:
dense_embedded_latents_scattered = self.scatter_predictions(volume_prediction)
dense_embedded_latents_nonfrozen = self.gather_non_frozen_predictions(dense_embedded_latents_scattered)
y_one_hot_non_frozen_, y_non_frozen_ = self.gather_non_frozen_labels(y_one_hot)
loss_dict.update(self.loss_seg(dense_embedded_latents_nonfrozen[aux_annotated, ...],
y_non_frozen_[aux_annotated, ...],
label_indices_active=self.label_indices_active))
else:
loss_dict['seg'] = torch.tensor(0., device=self.device, requires_grad=False)
# Self-supervised contrastive not available in the current setup, since patched embeddings would be required
loss_dict['all'] = sum(loss_dict.values())
loss_dict_all.append(loss_dict)
with torch.no_grad():
# metrics
dense_embedded_latents_scattered = self.scatter_predictions(volume_prediction)
predictions_one_hot = F.one_hot(dense_embedded_latents_scattered.argmax(dim=1).long(), num_classes=self.hparams.out_channels).permute(0, 4, 1, 2, 3)
self.score_seg_val(y_pred=predictions_one_hot,
y=y_one_hot_active)
# plotting
if self.hparams.plot and (self.current_epoch + 1) % self.hparams.plot_interval_val == 0 and batch_idx % 10 == 0 and batch_idx <= 20:
if not dist.is_initialized() or dist.get_rank() < 1:
png_paths = list()
png_paths.extend(image_grid.plot_grid_middle(x.detach().cpu(),
y_one_hot_gathered.detach().cpu(),
torch.softmax(volume_prediction.detach().cpu(), dim=1),
scribbles.detach().cpu() if scribbles is not None else None,
indices_elements=[idx_ * 2 for idx_ in range(max(min(x.shape[0] // 2, 2), 1))],
prefix=f'{self.hparams.run_name}_val_b{batch_idx}_s0_c{idx_category}_ep{str(self.current_epoch).zfill(3)}', path_plots=self.hparams.default_root_dir))
if self.hparams.online_on:
[self.logger[1].experiment.log({str(plb.Path(png_).stem[:-6]): wandb.Image(png_)}) for png_ in png_paths]
if len(loss_dict_all) > 0:
loss_dict_all = {k_: sum(dict_[k_] for dict_ in loss_dict_all) / len(loss_dict_all) for k_ in loss_dict_all[0].keys()}
with torch.no_grad():
# logging
self.log(f'val_epoch', self.current_epoch, sync_dist=True) # log for ckpt selection
for key_, value_ in loss_dict_all.items():
self.log(f'val_loss_{key_}', value_, sync_dist=True) # logs per epoch
else:
loss_dict_all = {'all': torch.tensor(0., device=self.device, requires_grad=False)}
return {'loss': loss_dict_all['all']}
def validation_epoch_end(self, outputs) -> None:
self.log(f'val_loss_mean', torch.mean(torch.tensor([x_['loss'] for x_ in outputs], dtype=torch.float)), sync_dist=True)
if self.hparams.downstream:
score_seg = self.score_seg_val.aggregate()
self.score_seg_val.reset()
self.log_dict({f'val_dice_c{str(idx_c).zfill(2)}': score_seg[idx_c] for idx_c in range(len(score_seg))}, sync_dist=True)
self.log(f'val_dice_mean', torch.mean(score_seg[1:]), sync_dist=True)
def test_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
if batch_idx == 0:
torch.cuda.empty_cache() # helps with fragmentation
# batch preparations
aux_names = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'name' in x_])))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
aux_frames = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'frame' in x_]))))
aux_domains = list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'domain' in x_]))))
aux_annotated = torch.stack(list(itertools.chain(*list(zip(*[batch[x_] for x_ in batch.keys() if 'annotated' in x_]))))) # custom collate on auxiliary keys, since monai can only concat arrays / tensors.
x = batch['image']
y = batch['label'].float().round().long()[:, 0, ...]
y_one_hot = einops.rearrange(F.one_hot(y, num_classes=self.hparams.out_channels), 'b h w d c -> b c h w d')
scribbles, scribbles_masked = None, None
if 'scribbles' in batch.keys():
scribbles = batch['scribbles'][:, 0, ...]
scribbles_masked = scribbles.clone()
scribbles_masked[~aux_annotated, ...] = self.hparams.out_channels # Mask non-annotated
coord_grids = None
if 'coord_grid' in batch.keys():
coord_grids = batch['coord_grid']
# Activate respective instructions
loss_dict_all = list()
foreground_labels = self.hparams.label_indices_downstream_active if self.hparams.downstream else self.hparams.label_indices_base
for idx_category in range(len(foreground_labels)):
torch.cuda.empty_cache() # helps with fragmentation
self.set_active_indices_test(batch_size=x.shape[0], foreground_category=foreground_labels[idx_category], device=y.device)
self.set_frozen_indices(device=y.device)
y_one_hot_gathered, y_gathered, y_one_hot_active = self.gather_labels(targets_one_hot=y_one_hot)
# Sliding Window Inference - by teacher
print(f'Evaluating batch {batch_idx} for category {foreground_labels[idx_category]} ({idx_category + 1}/{len(foreground_labels)}) with active indices {self.label_indices_active}.')
volume_prediction = minferers.sliding_window_inference(inputs=x,
roi_size=self.hparams.patch_size_students[0],
sw_batch_size=self.hparams.batch_size,
predictor=partial(self.forward_prediction, label_indices=self.label_indices_active),
overlap=self.hparams.sliding_window_overlap)
# Loss calculation
loss_dict, plots_dict = dict(), dict()
# Segmentation
test_loss = False
if test_loss and any(aux_annotated):
if not self.hparams.downstream and self.hparams.loss_weight_segmentation > 0.:
loss_dict.update(self.loss_seg(volume_prediction[aux_annotated, ...],
y_gathered[aux_annotated, ...],
label_indices_active=self.label_indices_active))
else:
dense_embedded_latents_scattered = self.scatter_predictions(volume_prediction)
dense_embedded_latents_nonfrozen = self.gather_non_frozen_predictions(dense_embedded_latents_scattered)
y_one_hot_non_frozen_, y_non_frozen_ = self.gather_non_frozen_labels(y_one_hot)
loss_dict.update(self.loss_seg(dense_embedded_latents_nonfrozen[aux_annotated, ...],
y_non_frozen_[aux_annotated, ...],
label_indices_active=self.label_indices_active))
else:
loss_dict['seg'] = torch.tensor(0., device=self.device, requires_grad=False)
# Self-supervised contrastive not available in the current setup, since patched embeddings would be required
loss_dict['all'] = sum(loss_dict.values())
loss_dict_all.append(loss_dict)
with torch.no_grad():
# metrics
dense_embedded_latents_scattered = self.scatter_predictions(volume_prediction)
test_score = False
if test_score:
predictions_one_hot = F.one_hot(dense_embedded_latents_scattered.argmax(dim=1).long(), num_classes=self.hparams.out_channels).permute(0, 4, 1, 2, 3)
self.score_seg_test(y_pred=predictions_one_hot,
y=y_one_hot_active)
# save predictions
test_viz = False
if test_viz:
import matplotlib
matplotlib.use('tkagg')
argmaxed = torch.argmax(dense_embedded_latents_scattered[0, ...], dim=0)
viewer = nib.viewers.OrthoSlicer3D(np.array((argmaxed / argmaxed.max()).detach().cpu()))
viewer.clim = [0.0, 1.0]
viewer.show()
post_transform = generate_test_post_transforms(output_dir=self.hparams.export_dir,
output_postfix=f'pred_cat{foreground_labels[idx_category]}',
transform_test=self.trainer.datamodule.transform_test,
n_classes=None)
batch['pred'] = dense_embedded_latents_scattered
[post_transform(x_) for x_ in decollate_batch(batch)]
# plotting
if self.hparams.plot and self.hparams.plot_interval_test > 0 and batch_idx % 10 == 0 and batch_idx <= 20:
if not dist.is_initialized() or dist.get_rank() < 1:
png_paths = list()
png_paths.extend(image_grid.plot_grid_middle(x.detach().cpu(),
y_one_hot_gathered.detach().cpu(),
torch.softmax(volume_prediction.detach().cpu(), dim=1),
scribbles.detach().cpu() if scribbles is not None else None,
indices_elements=[idx_ * 2 for idx_ in range(max(min(x.shape[0] // 2, 2), 1))],
prefix=f'{self.hparams.run_name}_test_b{batch_idx}_s0_c{idx_category}_ep{str(self.current_epoch).zfill(3)}', path_plots=self.hparams.default_root_dir))
if self.hparams.online_on:
[self.logger[1].experiment.log({str(plb.Path(png_).stem[:-6]): wandb.Image(png_)}) for png_ in png_paths]
if len(loss_dict_all) > 0:
loss_dict_all = {k_: sum(dict_[k_] for dict_ in loss_dict_all) / len(loss_dict_all) for k_ in loss_dict_all[0].keys()}
with torch.no_grad():
# logging
self.log(f'test_epoch', self.current_epoch, sync_dist=True) # log for ckpt selection
for key_, value_ in loss_dict_all.items():
self.log(f'test_loss_{key_}', value_, sync_dist=True) # logs per epoch
else:
loss_dict_all = {'all': torch.tensor(0., device=self.device, requires_grad=False)}
return {'loss': loss_dict_all['all']}
def test_epoch_end(self, outputs) -> None:
self.log(f'test_loss_mean', torch.mean(torch.tensor([x_['loss'] for x_ in outputs], dtype=torch.float)), sync_dist=True)
test_score = False
if test_score:
score_seg = self.score_seg_test.aggregate()
self.score_seg_test.reset()
self.log_dict({f'test_dice_c{str(idx_c).zfill(2)}': score_seg[idx_c] for idx_c in range(len(score_seg))}, sync_dist=True)
self.log(f'test_dice_mean', torch.mean(score_seg[1:]), sync_dist=True)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
pass
def configure_optimizers(self):
optimizer = AdamW([{'params': [param_ for name_, param_ in [*self.architecture.network_student.get_named_body_parameters(), *self.architecture.network_student.get_named_adapter_parameters()]]},
{'params': [param_ for name_, param_ in self.architecture.network_student.get_named_instruction_parameters()],
'lr': self.hparams.learning_rate_instructions if not self.hparams.downstream else self.hparams.learning_rate_instructions_downstream}],
lr=self.hparams.learning_rate if not self.hparams.downstream else self.hparams.learning_rate_downstream,
weight_decay=self.hparams.weight_decay if not self.hparams.downstream else self.hparams.weight_decay_downstream)
if self.hparams.downstream and self.hparams.with_scheduler_downstream:
assert self.hparams.max_epochs is not None
print(f'Using one cycle lr scheduler for {self.hparams.max_epochs} epochs and {1} steps per epoch.')
scheduler = lr_scheduler.OneCycleLR(optimizer,
max_lr=[self.hparams.learning_rate_downstream, self.hparams.learning_rate_instructions_downstream],
total_steps=None,
epochs=self.hparams.max_epochs,
steps_per_epoch=1, # The amount of scheduler.step() performed in an epoch. Probably defaults to 1 for lightning.
pct_start=0.1,
anneal_strategy='cos',
cycle_momentum=True,
base_momentum=0.85,
max_momentum=0.95,
div_factor=20, # 1e-2 / 1e2 = 1e-4
final_div_factor=1, # 1e-4 / 1 = 1e-4
three_phase=False,
last_epoch=- 1,
verbose=False)
return [optimizer], [scheduler]
return optimizer
def selective_freezing(self, batch_idx):
if batch_idx == 0:
grad_instructions = self.label_indices_active[0].bool() if self.hparams.freeze_inactive else ~self.label_indices_frozen.bool() # Expects all batch entries to have the same target during downstream
if self.hparams.separate_background:
grad_instructions[0] = False # 0 isn't used in this case in the pools anyway.
print(f'\nBatch idx: {batch_idx} - Active labels: {self.label_indices_active[0].bool()}.')
print(f'\nBatch idx: {batch_idx} - Gradients for instructions: {grad_instructions},'
f' instructions norm: {not self.hparams.freeze_body},'
f' instruction bias scores: {not self.hparams.freeze_bias_scores},'
f' body: {not self.hparams.freeze_norm}.')
self.architecture.network_student.set_requires_gradient(grad_instructions=grad_instructions,
grad_instructions_norm=not self.hparams.freeze_norm,
grad_instructions_scores=not self.hparams.freeze_bias_scores,
grad_body=not self.hparams.freeze_body)
def set_active_indices_training(self,
batch_size: int,
device: torch.device):
if not self.hparams.downstream:
# Active categories
active_classes = torch.randint(low=self.hparams.label_indices_min_active, high=self.hparams.label_indices_max_active + 1, size=(1,), device=device)[0] # Atm same active number of classes across a batch (otherwise some kind of masking would be needed in the att.)
weights = torch.ones((len(self.hparams.label_indices_base),), dtype=torch.float, device=device).reshape(1, -1).expand(batch_size, -1) # [B, L]
rand_indices = torch.multinomial(weights, num_samples=active_classes, replacement=False)
rand_foreground = torch.tensor(self.hparams.label_indices_base, dtype=torch.long, device=device)[rand_indices]
active_indices = torch.zeros((batch_size, self.hparams.out_channels), dtype=torch.long, device=device)
active_indices[:, 0] = 1 # Set background
for idx_active in range(active_classes): # Set foreground
active_indices[torch.arange(batch_size), rand_foreground[:, idx_active]] = 1
self.label_indices_active = active_indices
else:
# Active categories
active_indices = torch.zeros((batch_size, self.hparams.out_channels), dtype=torch.long, device=device)
active_indices[:, 0] = 1
for idx_active in range(len(self.hparams.label_indices_downstream_active)):
active_indices[:, self.hparams.label_indices_downstream_active[idx_active]] = 1
self.label_indices_active = active_indices
def set_active_indices_validation(self,
batch_size: int,
foreground_category: int, # categories can be drawn manually (alongside random ones)
device: torch.device):
if not self.hparams.downstream:
active_classes = self.hparams.label_indices_max_active
weights = torch.ones((self.hparams.out_channels - 1,), dtype=torch.float, device=device).reshape(1, -1).expand(batch_size, -1)
weights[:, foreground_category - 1] = 0.
rand_indices = torch.tensor([[foreground_category - 1]], dtype=torch.long, device=device)
if active_classes > 1:
rand_indices = torch.concat([rand_indices, torch.multinomial(weights, num_samples=active_classes - 1, replacement=False)], dim=1)
rand_foreground = torch.arange(1, self.hparams.out_channels, dtype=torch.long, device=device)[rand_indices]
active_indices = torch.zeros((batch_size, self.hparams.out_channels), dtype=torch.long, device=device)
active_indices[:, 0] = 1 # Set background
for idx_active in range(active_classes): # Set foreground
active_indices[torch.arange(batch_size), rand_foreground[:, idx_active]] = 1
self.label_indices_active = active_indices
else:
# Active categories
active_indices = torch.zeros((batch_size, self.hparams.out_channels), dtype=torch.long, device=device)
active_indices[:, 0] = 1
for idx_active in range(len(self.hparams.label_indices_downstream_active)):
active_indices[:, self.hparams.label_indices_downstream_active[idx_active]] = 1
self.label_indices_active = active_indices
def set_active_indices_test(self,
batch_size: int,
foreground_category: int, # categories can be drawn manually (alongside random ones)
device: torch.device):
if not self.hparams.downstream:
active_classes = self.hparams.label_indices_max_active
weights = torch.ones((self.hparams.out_channels - 1,), dtype=torch.float, device=device).reshape(1, -1).expand(batch_size, -1)
weights[:, foreground_category - 1] = 0.
rand_indices = torch.tensor([[foreground_category - 1]], dtype=torch.long, device=device)
if active_classes > 1:
rand_indices = torch.concat([rand_indices, torch.multinomial(weights, num_samples=active_classes - 1, replacement=False)], dim=1)
rand_foreground = torch.arange(1, self.hparams.out_channels, dtype=torch.long, device=device)[rand_indices]
active_indices = torch.zeros((batch_size, self.hparams.out_channels), dtype=torch.long, device=device)
active_indices[:, 0] = 1 # Set background
for idx_active in range(active_classes): # Set foreground
active_indices[torch.arange(batch_size), rand_foreground[:, idx_active]] = 1
self.label_indices_active = active_indices
else:
# Active categories
active_indices = torch.zeros((batch_size, self.hparams.out_channels), dtype=torch.long, device=device)
active_indices[:, 0] = 1
for idx_active in range(len(self.hparams.label_indices_downstream_active)):
active_indices[:, self.hparams.label_indices_downstream_active[idx_active]] = 1
self.label_indices_active = active_indices
def set_frozen_indices(self,
device: torch.device):
# Frozen categories - Does not impact the selection of instructions
if not self.hparams.downstream:
self.label_indices_frozen = torch.zeros((self.hparams.out_channels,), dtype=torch.long, device=device) # There are currently no frozen categories during the "normal" training routine
else:
frozen_indices = torch.zeros((self.hparams.out_channels, ), dtype=torch.long, device=device)
for idx_frozen in range(len(self.hparams.label_indices_downstream_frozen)):
frozen_indices[self.hparams.label_indices_downstream_frozen[idx_frozen]] = 1
self.label_indices_frozen = frozen_indices
def scatter_predictions(self, preds: torch.Tensor):
""" Scatter preds for each batch-element (with possibly different indices) to respective label indices """
preds_scattered = torch.zeros((preds.shape[0], self.hparams.out_channels, *preds.shape[2:]), dtype=preds.dtype, device=preds.device)
for idx_batch in range(preds.shape[0]):
preds_indices = einops.rearrange(torch.nonzero(self.label_indices_active[idx_batch, :], as_tuple=False).squeeze(), 'c -> c () () ()').expand(-1, *preds.shape[2:])
preds_scattered[idx_batch, ...].scatter_add_(dim=0, index=preds_indices, src=preds[idx_batch, ...]) # optionally: - preds[idx_batch, ...].detach().min() - min to ensure to avoid going lower than initialization (0)?
return preds_scattered
def gather_labels(self, targets_one_hot: torch.Tensor):
""" Gather targets for each batch-element (with possibly different indices) to respective prediction indices """
targets_one_hot_gathered = list()
targets_one_hot_active = list()
for idx_batch in range(targets_one_hot.shape[0]):
# Select active labels (can vary for each batch element)
targets_indices = einops.rearrange(torch.nonzero(self.label_indices_active[idx_batch, :], as_tuple=False).squeeze(), 'c -> c () () ()').expand(-1, *targets_one_hot.shape[2:])
targets_element = torch.gather(targets_one_hot[idx_batch, ...], dim=0, index=targets_indices)
# Add inactive labels to background
targets_element_inactive = targets_one_hot[idx_batch].clone()
targets_element_inactive[self.label_indices_active[idx_batch, :] == 1, ...] = 0 # Mask active elements
targets_element[0, ...] += targets_element_inactive.sum(dim=0) # Add inactive elements to background
targets_one_hot_gathered.append(targets_element)
# New target for metric comparison
targets_element_active = targets_one_hot[idx_batch].clone()
targets_element_active[0, ...] += targets_element_inactive.sum(dim=0) # Add inactive elements to background
targets_element_active[self.label_indices_active[idx_batch, :] == 0, ...] = 0 # Mask inactive elements
targets_one_hot_active.append(targets_element_active)
targets_one_hot_gathered = torch.stack(targets_one_hot_gathered, dim=0)
targets_gathered = torch.argmax(targets_one_hot_gathered, dim=1)
targets_one_hot_active = torch.stack(targets_one_hot_active, dim=0)
return targets_one_hot_gathered, targets_gathered, targets_one_hot_active
def gather_non_frozen_predictions(self, preds_scattered: torch.Tensor):
"""
Gather preds so only non-frozen & active categories are considered as foreground. Thus, each frozen category (active or not) is aggregated into background.
Operating on preds_scattered for streamlined gather process.
"""
n_batch = preds_scattered.shape[0]
# Scatter inactive / frozen in background, active & non-frozen into consecutive foreground dims
label_indices_all = torch.arange(self.hparams.out_channels, dtype=torch.long, device=preds_scattered.device)
label_indices_frozen_ = self.label_indices_frozen.clone()
label_indices_frozen_[0] = 0 # For predictions / targets a background is needed, so it is always considered "non-frozen" for prediction / target aggregation (regardless if its truly frozen or not)
labels_active_and_nonfrozen_mask = torch.logical_and(self.label_indices_active.bool(), ~label_indices_frozen_.unsqueeze(dim=0).expand(n_batch, -1).bool()).long()
predictions_non_frozen = torch.zeros((preds_scattered.shape[0],
torch.count_nonzero(labels_active_and_nonfrozen_mask[0, ...]).item(), # All batch elements should have the same amount of active elements (to keep it in a tensor)
*preds_scattered.shape[2:]), dtype=preds_scattered.dtype, device=preds_scattered.device)
for idx_batch in range(n_batch):
label_indices_active_and_nonfrozen = labels_active_and_nonfrozen_mask[idx_batch, ...] * label_indices_all # all inactive / frozen ones are zero
_, inverse_indices = torch.unique(label_indices_active_and_nonfrozen, sorted=True, return_inverse=True)
preds_indices = einops.rearrange(inverse_indices, 'c -> c () () ()').expand(-1, *preds_scattered.shape[2:])
predictions_non_frozen[idx_batch, ...].scatter_add_(dim=0, index=preds_indices, src=preds_scattered[idx_batch, ...])
return predictions_non_frozen
def gather_non_frozen_labels(self, targets_one_hot: torch.Tensor):
""" Gather targets so only non-frozen & active categories are considered as foreground. Thus, each frozen category (active or not) is aggregated into background """
n_batch = targets_one_hot.shape[0]
# Scatter inactive / frozen in background, active & non-frozen into consecutive foreground dims
label_indices_all = torch.arange(self.hparams.out_channels, dtype=torch.long, device=targets_one_hot.device)
label_indices_frozen_ = self.label_indices_frozen.clone()
label_indices_frozen_[0] = 0 # For predictions / targets a background is needed, so it is always considered "non-frozen" for prediction / target aggregation (regardless if its truly frozen or not)
labels_active_and_nonfrozen_mask = torch.logical_and(self.label_indices_active.bool(), ~label_indices_frozen_.unsqueeze(dim=0).expand(n_batch, -1).bool()).long()
targets_one_hot_non_frozen = torch.zeros((targets_one_hot.shape[0],
torch.count_nonzero(labels_active_and_nonfrozen_mask[0, ...]).item(), # All batch elements should have the same amount of active elements (to keep it in a tensor)
*targets_one_hot.shape[2:]), dtype=targets_one_hot.dtype, device=targets_one_hot.device)
for idx_batch in range(n_batch):
label_indices_active_and_nonfrozen = labels_active_and_nonfrozen_mask[idx_batch, ...] * label_indices_all # all inactive / frozen ones are zero
_, inverse_indices = torch.unique(label_indices_active_and_nonfrozen, sorted=True, return_inverse=True)
preds_indices = einops.rearrange(inverse_indices, 'c -> c () () ()').expand(-1, *targets_one_hot.shape[2:])
targets_one_hot_non_frozen[idx_batch, ...].scatter_add_(dim=0, index=preds_indices, src=targets_one_hot[idx_batch, ...])
targets_non_frozen = torch.argmax(targets_one_hot_non_frozen, dim=1)
return targets_one_hot_non_frozen, targets_non_frozen
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
state_dict = checkpoint["state_dict"].copy()
model_state_dict = self.state_dict()
is_changed = False
for k in state_dict:
if k in model_state_dict:
# Adjust parameters with size mismatch
if state_dict[k].shape != model_state_dict[k].shape:
print(f"Skip loading parameter: {k}, "
f"required shape: {model_state_dict[k].shape}, "
f"loaded shape: {state_dict[k].shape}")
checkpoint["state_dict"][k] = model_state_dict[k]
is_changed = True
else:
# Remove parameters not in the actual model
warnings.warn(f"Dropping parameter: {k}")
del checkpoint["state_dict"][k]
is_changed = True
if is_changed:
checkpoint.pop("optimizer_states", None)
def on_save_checkpoint(self, checkpoint):
if self.hparams.s3_bucket and self.hparams.online_on:
print(f'\rUploading checkpoint to {self.hparams.ckpt_dir} ...')
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--learning_rate', default=1e-4, type=float)
parser.add_argument('--learning_rate_downstream', default=5e-4, type=float)
parser.add_argument('--learning_rate_instructions', default=1e-3, type=float)
parser.add_argument('--learning_rate_instructions_downstream', default=5e-3, type=float)
parser.add_argument('--weight_decay', default=1e-2, type=float)
parser.add_argument('--weight_decay_downstream', default=0, type=float)
parser.add_argument('--with_scheduler_downstream', default=True, type=bool)
parser.add_argument('--sliding_window_overlap', default=0.5, type=float)
# Segmentation loss
parser.add_argument('--loss_weight_segmentation', default=1e-0, type=float)
parser.add_argument('--loss_weight_segmentation_gamma', default=4.0, type=float)
parser.add_argument('--loss_weight_segmentation_alpha_background', default=1.0, type=float)
parser.add_argument('--loss_weight_segmentation_alpha_background_downstream', default=1.0, type=float) # May use higher value than during training, to avoid fast collapse of background into foreground.
parser.add_argument('--loss_weight_segmentation_alpha_foreground', default=1.0, type=float)
# Contrastive loss
parser.add_argument('--contrastive_pairs', default=True, type=bool)
parser.add_argument('--loss_weight_sim_paired', default=0., type=float)
parser.add_argument('--loss_weight_sim_protos', default=1e-2, type=float)
parser.add_argument('--loss_weight_sim_protos_downstream', default=0, type=float)
parser.add_argument('--loss_weight_sim_closest', default=0., type=float)
parser.add_argument('--loss_weight_dissim_closest', default=0., type=float)
parser.add_argument('--k_means_iterations', default=3, type=int)
parser.add_argument('--reduction_factor', default=2., type=float)
parser.add_argument('--reduction_factor_protos', default=8., type=float)
parser.add_argument('--fwhm_student_teacher', default=128., type=float)
parser.add_argument('--fwhm_teacher_protos', default=128., type=float)
parser.add_argument('--use_weighting_protos', default=True, type=bool)
parser.add_argument('--use_weighting_teacher', default=False, type=bool)
# Instruction swap
parser.add_argument('--label_indices_min_active', default=1, type=int) # Minimal amount of active FOREGROUND categories during training
parser.add_argument('--label_indices_max_active', default=1, type=int) # Maximal amount of active FOREGROUND categories during training
parser.add_argument('--label_indices_base', default=[1], nargs='*', type=int) # Without background (which is expected to be 0)
parser.add_argument('--label_indices_downstream_active', default=[4], nargs='*', type=int) # Active foreground classes (frozen / non-frozen) in the downstream task (without background which is expected to be 0)
parser.add_argument('--label_indices_downstream_frozen', default=[], nargs='*', type=int) # Active but frozen foreground (AND background) classes (which will be aggregated into background during "downstream" training so no "old" annotations are required in the case of class extension).
parser.add_argument('--downstream', action='store_true') # Train solely categories in label_indices_downstream_active
parser.add_argument('--selective_freezing', action='store_true') # Freeze the network except for active instructions not in label_indices_downstream_frozen
parser.add_argument('--freeze_body', default=True, type=bool) # Freeze body if selective_freezing is active
parser.add_argument('--freeze_norm', default=False, type=bool) # Freeze instruction norm if selective_freezing is active
parser.add_argument('--freeze_bias_scores', default=False, type=bool) # Freeze instruction bias scores if selective_freezing is active
parser.add_argument('--freeze_inactive', default=True, type=bool) # Freeze inactive instructions regardless of whether they are frozen or not
parser.add_argument('--separate_background', default=True, type=bool) # Each foreground class has its own background (for binary prediction case)
return parser
| Python |
3D | marcdcfischer/PUNet | src/modules/blocks/instruction_pool.py | .py | 2,896 | 52 | import torch
import torch.nn as nn
from src.modules.blocks.query_encodings import LearnedNormedQuery, LearnedNormedInstruction
from typing import Tuple, Optional
import einops
class InstructionPool(nn.Module):
def __init__(self,
instruction_pool_size: int, # Atm expects an instruction pool size of 5
hidden_channels: int,
default_instructions: int,
tokens_per_instruction: int = 10,
separate_background: bool = True,
use_norm: bool = False,
elementwise_affine: bool = True):
super().__init__()
self.instruction_pool_size = instruction_pool_size
self.hidden_channels = hidden_channels
self.default_instructions = default_instructions
self.separate_background = separate_background
self.instruction_tokens = LearnedNormedInstruction(instruction_pool_size=instruction_pool_size,
tokens_per_instruction=tokens_per_instruction,
instruction_channels=hidden_channels,
use_norm=use_norm,
elementwise_affine=elementwise_affine) # Pool with size of all possible combinations
def forward(self, label_indices: Optional[torch.Tensor] = None, batch_size: Optional[int] = -1):
"""
:param label_indices: None or [B, C]
:return:
"""
if label_indices is None:
assert batch_size > 1 # If no label indices are given, a batch size is required.
label_indices = torch.ones((batch_size, self.default_instructions), dtype=torch.long) # [B, I_def]
# Map label indices to corresponding instructions (atm combinations are hardcoded)
instruction_tokens = self.instruction_tokens() # [I, N, C]
instructions = list()
for idx_batch in range(label_indices.shape[0]):
if self.separate_background:
instruction_indices_true = torch.nonzero(label_indices[idx_batch, 1:], as_tuple=False).squeeze(dim=1) + 1 # All indices with designated background instruction ignored
else:
instruction_indices_true = torch.nonzero(label_indices[idx_batch, :], as_tuple=False).squeeze(dim=1) # All indices including shared background
selected_tokens = instruction_tokens[instruction_indices_true, ...] # [I_active, N, C]
instructions.append(einops.rearrange(selected_tokens, 'i n c -> (i n) c'))
# Note: some instruction_tokens are not reachable (due to expected label ordering) and could be omitted.
instructions = torch.stack(instructions, dim=0) if len(instructions) > 0 else None # [B, (I N), C]
return instructions
| Python |
3D | marcdcfischer/PUNet | src/modules/blocks/sia_block_deep.py | .py | 13,630 | 222 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Sequence, Tuple, Optional, List
from src.modules.blocks.attentive_mechanism_masked import WindowedMaskedAttentionBlock
from src.modules.blocks.query_encodings import DeepInstructedAttentionPositionScores
from einops import rearrange
import math
from itertools import chain
# follows original implementation of swin attention (wmsa, swmsa) loosely:
# https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer.py, https://github.com/ahatamiz/MONAI/blob/swin_unetr_v1/monai/networks/nets/swin_unetr.py
class DeepShiftedInstructedAttentionBlock(nn.Module):
"""
Block consisting of two attention blocks (windowed and shifted & windowed)
"""
def __init__(self,
hidden_channels: int,
instruction_pool_size: int,
tokens_per_instruction: int,
separate_background: bool = True,
window_size: Sequence[int] = (8, 8, 1),
shift_size: Optional[Sequence[int]] = None,
heads: int = 4,
unique_instruction_bias: bool = True,
unique_token_bias: bool = True,
no_bias_instructions: bool = False,
no_bias_content: bool = False,
adapter: bool = False):
super().__init__()
self.hidden_channels = hidden_channels
self.instruction_pool_size = instruction_pool_size
self.unique_instruction_bias = unique_instruction_bias
self.unique_token_bias = unique_token_bias
self.window_size = window_size
self.shift_size = shift_size if shift_size is not None else tuple(x_ // 2 for x_ in window_size)
self.heads = heads
self.msa_blocks = nn.ModuleList()
self.attention_scores = nn.ModuleList()
for idx_block in range(2):
self.msa_blocks.append(WindowedMaskedAttentionBlock(hidden_channels=self.hidden_channels,
heads=self.heads,
adapter=adapter))
self.attention_scores.append(DeepInstructedAttentionPositionScores(embedding_dim=32, # atm hardcoded
heads=self.heads,
instruction_pool_size=self.instruction_pool_size,
tokens_per_instruction=tokens_per_instruction,
separate_background=separate_background,
max_absolute_positions=self.window_size,
max_capped_distances=self.window_size,
unique_instruction_bias=self.unique_instruction_bias,
unique_token_bias=self.unique_token_bias,
no_bias_instructions=no_bias_instructions,
no_bias_content=no_bias_content))
def get_window_size(self, x_dim):
window_size_ = list(self.window_size)
shift_size_ = list(self.shift_size)
for idx_dim in range(len(x_dim)):
# Shrink window if whole content is smaller
if x_dim[idx_dim] <= window_size_[idx_dim]:
window_size_[idx_dim] = x_dim[idx_dim]
shift_size_[idx_dim] = 0
return tuple(window_size_), tuple(shift_size_)
def get_attn_mask(self,
x_shape: Tuple[int, int, int],
window_size: Tuple[int, int, int],
shift_size: Tuple[int, int, int],
paddings: Tuple[int, ...],
device: Optional[torch.device] = None):
with torch.no_grad():
# calculate attention mask for SW-MSA
image_mask = torch.zeros(x_shape, dtype=torch.float, device=device) # [H, W, D]
h_slices = (slice(0, -window_size[0]),
slice(-window_size[0], -shift_size[0]),
slice(-shift_size[0], None))
w_slices = (slice(0, -window_size[1]),
slice(-window_size[1], -shift_size[1]),
slice(-shift_size[1], None))
d_slices = (slice(0, -window_size[2]),
slice(-window_size[2], -shift_size[2]),
slice(-shift_size[2], None))
# Encode each region by an int
cnt = 0
for h in h_slices:
for w in w_slices:
for d in d_slices:
image_mask[h, w, d] = cnt
cnt += 1
# Encode non-padded regions differently, so paddings can't interact with true content
if any([x_ > 0 for x_ in paddings]):
image_mask[paddings[0]: x_shape[0] - paddings[1],
paddings[2]: x_shape[1] - paddings[3],
paddings[4]: x_shape[2] - paddings[5]] = 100
mask_windows = rearrange(self.window_partitioning(rearrange(image_mask, 'h w d -> () () h w d'), window_size).squeeze(dim=2), 'b p h w d -> b p (h w d)') # [1 (B), P, (H' W' D')]
attn_mask = rearrange(mask_windows, 'b p n -> b p () n') - rearrange(mask_windows, 'b p n -> b p n ()') # [1 (B), P, (H' W' D'), (H' W' D')]
attn_mask = (~(attn_mask == 0)).float() # Multiplicative mask with zeros for regions with different int encoding
return attn_mask
@staticmethod
def window_partitioning(x: torch.Tensor, window_size: Tuple[int, int, int]):
"""
:param x: [B, C, H, W, D]. Expects image content (not nodes [B N C])
:return: [B, P, C, H', W', D']
"""
x = rearrange(x, 'b c (h p1) (w p2) (d p3) -> b (p1 p2 p3) c h w d', h=window_size[0], w=window_size[1], d=window_size[2])
return x
@staticmethod
def window_recombination(x: torch.Tensor, window_size: Tuple[int, int, int], x_shape: Tuple[int, int, int]):
"""
:param x: [B, P, C, H', W', D']. Expects image content (not nodes [B N C])
:return: [B, C, H, W, D]
"""
x = rearrange(x, 'b (p1 p2 p3) c h w d -> b c (h p1) (w p2) (d p3)', p1=x_shape[0] // window_size[0], p2=x_shape[1] // window_size[1], p3=x_shape[2] // window_size[2])
return x
def forward(self, x: torch.Tensor, x_instructions: Optional[List[torch.Tensor]] = None, label_indices: Optional[torch.Tensor] = None):
"""
:param x: [B, C, H, W, D]. Expects image content (not nodes [B N C])
:param x_instructions: [B, I, C]
:return: [B, C, H, W, D], [B, I, C]
"""
b, c, h, w, d = x.shape
n_instructions = x_instructions[0].shape[1] if x_instructions is not None else 0
window_size_, shift_size_ = self.get_window_size((h, w, d))
# Pad content if necessary
paddings = (0, 0, 0, 0, 0, 0)
if any([h % window_size_[0] != 0, w % window_size_[1], d % window_size_[2]]):
paddings = [math.floor((window_size_[0] - h % window_size_[0]) / 2), math.ceil((window_size_[0] - h % window_size_[0]) / 2),
math.floor((window_size_[1] - w % window_size_[1]) / 2), math.ceil((window_size_[1] - w % window_size_[1]) / 2),
math.floor((window_size_[2] - d % window_size_[2]) / 2), math.ceil((window_size_[2] - d % window_size_[2]) / 2)]
paddings[-1] = 0 if window_size_[2] == 1 else paddings[-1] # don't pad depth singleton dim.
x = F.pad(x, tuple(reversed(paddings))) # F.pad needs reverse order (starting from last)
h_padded, w_padded, d_padded = x.shape[2:]
# Calc mask attention for cyclically shifted content
with torch.no_grad():
attn_mask_ = self.get_attn_mask(x_shape=(h_padded, w_padded, d_padded), window_size=window_size_, shift_size=shift_size_, paddings=paddings, device=x.device) # [1 (B), P, (H W D), (H W D)]. For cyclically shifted elements in shifted attention
attn_mask_all = torch.zeros((attn_mask_.shape[0], attn_mask_.shape[1], n_instructions + attn_mask_.shape[2], n_instructions + attn_mask_.shape[3]), dtype=torch.float, device=x.device) # [1 (B), P, I + (H W D), I + (H W D)]. Instructions are never masked.
attn_mask_all[:, :, n_instructions:, n_instructions:] = attn_mask_ # mask for content -> content interaction
if x_instructions is not None:
# Enable instruction -> content interaction if instructions are given.
attn_mask_all[:, :, n_instructions:, :n_instructions] = 1. # mask for instruction -> content interaction. (lazy way of excluding inst -> inst, content -> inst)
attn_mask_all = rearrange(attn_mask_all, 'b p n m -> b p () n m') # [1 (B), P, 1 (H), N, N]
# WMSA
# Calc position-based scores (including those for instructions, content and cross-attention)
pos_scores = self.attention_scores[0](dim_q=n_instructions + math.prod(window_size_), dim_k=n_instructions + math.prod(window_size_), dim_i=n_instructions,
dim_h=window_size_[0], dim_w=window_size_[1], dim_d=window_size_[2], label_indices=label_indices, device=x.device) # [1 (B), H, I + (H W D), I + (H W D)]
pos_scores = rearrange(pos_scores, 'b h n m -> b () h n m') # [B, 1 (P), H, N, N]
x_windowed = self.window_partitioning(x, window_size_) # [B', P, C, H', W', D']
if x_instructions is not None:
x_all = torch.cat([rearrange(x_instructions[0], 'b i c -> b () i c').expand(-1, x_windowed.shape[1], -1, -1),
rearrange(x_windowed, 'b p c h w d -> b p (h w d) c')], dim=2) # Concat instructions
else:
x_all = rearrange(x_windowed, 'b p c h w d -> b p (h w d) c')
x_all = self.msa_blocks[0](x_all, pos_scores=pos_scores)
# x_instructions = x_all[:, :, :n_instructions, :].mean(dim=1) if x_instructions is not None else x_instructions # For instruction take average of all windows
x_windowed = rearrange(x_all[:, :, n_instructions:, :], 'b p (h w d) c -> b p c h w d', h=window_size_[0], w=window_size_[1], d=window_size_[2])
x = self.window_recombination(x_windowed, window_size_, x_shape=(h_padded, w_padded, d_padded))
# SWMSA
# Calc position-based scores (including those for instructions, content and cross-attention)
pos_scores = self.attention_scores[1](dim_q=n_instructions + math.prod(window_size_), dim_k=n_instructions + math.prod(window_size_), dim_i=n_instructions,
dim_h=window_size_[0], dim_w=window_size_[1], dim_d=window_size_[2], label_indices=label_indices, device=x.device) # [1 (B), H, I + (H W D), I + (H W D)]
pos_scores = rearrange(pos_scores, 'b h n m -> b () h n m') # [B, 1 (P), H, N, N]
x = torch.roll(x, shifts=(-shift_size_[0], -shift_size_[1], -shift_size_[2]), dims=(2, 3, 4)) if any([x_ > 0 for x_ in shift_size_]) else x # shifted backwards
x_windowed = self.window_partitioning(x, window_size_) # [B', P, C, H', W', D']
if x_instructions is not None:
x_all = torch.cat([rearrange(x_instructions[1], 'b i c -> b () i c').expand(-1, x_windowed.shape[1], -1, -1),
rearrange(x_windowed, 'b p c h w d -> b p (h w d) c')], dim=2) # Concat instructions
else:
x_all = rearrange(x_windowed, 'b p c h w d -> b p (h w d) c')
x_all = self.msa_blocks[1](x_all, mask=attn_mask_all, pos_scores=pos_scores)
# x_instructions = x_all[:, :, :n_instructions, :].mean(dim=1) if x_instructions is not None else x_instructions # For instruction take average of all windows
x_windowed = rearrange(x_all[:, :, n_instructions:, :], 'b p (h w d) c -> b p c h w d', h=window_size_[0], w=window_size_[1], d=window_size_[2])
x = self.window_recombination(x_windowed, window_size_, x_shape=(h_padded, w_padded, d_padded))
x = torch.roll(x, shifts=shift_size_, dims=(2, 3, 4)) if any([x_ > 0 for x_ in shift_size_]) else x # shifted forwards (reverse)
# Crop padded content if necessary
if any([x_ > 0 for x_ in paddings]):
x = x[...,
paddings[0]: x.shape[2] - paddings[1],
paddings[2]: x.shape[3] - paddings[3],
paddings[4]: x.shape[4] - paddings[5]] # F.pad needs reverse order (starting from last)
return x
def named_parameters_attention(self):
return list(chain(*[x_.named_parameters_body() for x_ in self.msa_blocks]))
def named_parameters_adapter(self):
return list(chain(*[x_.named_parameters_adapter() for x_ in self.msa_blocks]))
def named_parameters_bias_content(self):
return list(chain(*[x_.named_parameters_bias_content() for x_ in self.attention_scores]))
def named_parameters_bias_instructions(self):
return list(chain(*[x_.named_parameters_bias_instructions() for x_ in self.attention_scores]))
| Python |
3D | marcdcfischer/PUNet | src/modules/blocks/sia_res_block_deep.py | .py | 4,634 | 100 | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.modules.blocks.sia_block_deep import DeepShiftedInstructedAttentionBlock
from typing import Sequence, Optional, List
from monai.networks.blocks import Convolution
from monai.networks.layers.utils import get_act_layer, get_norm_layer
from itertools import chain
class DeepSIAResBlock(nn.Module):
def __init__(self,
channels_in: int,
channels_out: int,
instruction_pool_size: int,
tokens_per_instruction: int,
separate_background: bool = True,
kernel_size: Sequence[int] = (3, 3, 1),
strides: Sequence[int] = (2, 2, 1),
heads: int = 4,
window_size: Sequence[int] = (8, 8, 1),
act: str = "leakyrelu",
norm: str = "batch",
unique_instruction_bias: bool = True,
unique_token_bias: bool = True,
no_bias_instructions: bool = False,
no_bias_content: bool = False,
adapter: bool = False):
super().__init__()
self.channels_in = channels_in
self.channels_out = channels_out
self.act = get_act_layer(name=act)
self.norm = get_norm_layer(name=norm, spatial_dims=3, channels=self.channels_in)
self.conv = Convolution(spatial_dims=3,
in_channels=self.channels_in,
out_channels=self.channels_out,
kernel_size=kernel_size,
strides=strides,
act="leakyrelu",
norm="batch",
is_transposed=False,
conv_only=True)
self.sia = DeepShiftedInstructedAttentionBlock(hidden_channels=self.channels_out,
instruction_pool_size=instruction_pool_size,
tokens_per_instruction=tokens_per_instruction,
separate_background=separate_background,
heads=heads,
window_size=window_size,
unique_instruction_bias=unique_instruction_bias,
unique_token_bias=unique_token_bias,
no_bias_instructions=no_bias_instructions,
no_bias_content=no_bias_content,
adapter=adapter)
self.norm_res = get_norm_layer(name=norm, spatial_dims=3, channels=self.channels_in)
self.conv_res = Convolution(spatial_dims=3,
in_channels=self.channels_in,
out_channels=self.channels_out,
kernel_size=(1, 1, 1),
strides=strides,
act="leakyrelu",
norm="batch",
is_transposed=False,
conv_only=True)
def forward(self, x: torch.Tensor, x_instructions: Optional[List[torch.Tensor]] = None, label_indices: Optional[torch.Tensor] = None):
residual = x
# Downsampling
x = self.conv(self.act(self.norm(x)))
# SIA
x = self.sia(x, x_instructions, label_indices=label_indices)
# Residuals
x = x + self.conv_res(self.norm_res(residual))
return x
def named_parameters_body(self):
parameters_down = list(chain(*[self.conv.named_parameters(), self.norm.named_parameters()]))
parameters_res = list(chain(*[self.conv_res.named_parameters(), self.norm_res.named_parameters()]))
parameters_sia_att = self.sia.named_parameters_attention()
parameters_bias_content = self.sia.named_parameters_bias_content()
return list(chain(*[parameters_down, parameters_res, parameters_sia_att, parameters_bias_content]))
def named_parameters_adapter(self):
return self.sia.named_parameters_adapter()
def named_parameters_bias_instructions(self):
parameters_bias_instructions = list(self.sia.named_parameters_bias_instructions())
return parameters_bias_instructions
| Python |
3D | marcdcfischer/PUNet | src/modules/blocks/attentive_mechanism_masked.py | .py | 2,819 | 74 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple
from src.modules.blocks.attention import WindowedMaskedAttention
from torch.utils.checkpoint import checkpoint
# see https://github.com/KMnP/vpt/blob/e2dd70a5ee291d398d002e6963ddbe0f66f58038/src/models/vit_adapter/adapter_block.py#L25 for adapter adaptation
class WindowedMaskedAttentionBlock(nn.Module):
"""
Generic attention block for masked attention
"""
def __init__(self,
hidden_channels: int,
heads: int = 4,
reduction_factor: int = 4, # atm hardcoded
adapter: bool = False,
use_checkpoint: bool = False):
super().__init__()
self.adapter = adapter
self.use_checkpoint = use_checkpoint
self.attention_norm = nn.LayerNorm(hidden_channels)
self.attention = WindowedMaskedAttention(q_channels=hidden_channels,
heads=heads,
separate_norms=False)
self.mlp_norm = nn.LayerNorm(hidden_channels)
self.mlp = nn.Linear(hidden_channels, hidden_channels)
if self.adapter:
self.linear_down = nn.Linear(hidden_channels, hidden_channels // reduction_factor)
self.linear_up = nn.Linear(hidden_channels // reduction_factor, hidden_channels)
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None, pos_scores: Optional[torch.Tensor] = None):
"""
:param x: Key / value content [B, P, N, C]
:return:
"""
# Attention block
x_att = self.attention_norm(x)
if self.use_checkpoint:
x_att = checkpoint(self.attention, x_att, x_att, x_att, mask, None, None, pos_scores, preserve_rng_state=False, use_reentrant=False)
else:
x_att = self.attention(q=x_att, k=x_att, v=x_att, mask=mask, pos_scores=pos_scores)
x = x + x_att
# Residual and MLP
x_mlp = self.mlp_norm(x) # For next iteration add a small MLP with at least on activation
x_mlp = self.mlp(x_mlp)
x = x + x_mlp
# Adapter - with pre- and post- residual connection
if self.adapter:
x = x + self.linear_up(F.leaky_relu(self.linear_down(x)))
return x
def named_parameters_body(self):
return [*list(self.attention_norm.named_parameters()), *list(self.attention.named_parameters()),
*list(self.mlp_norm.named_parameters()), *list(self.mlp.named_parameters())]
def named_parameters_adapter(self):
params_ = list()
if self.adapter:
params_ += [*self.linear_down.named_parameters(), *self.linear_up.named_parameters()]
return params_
| Python |
3D | marcdcfischer/PUNet | src/modules/blocks/mlp.py | .py | 542 | 20 | import torch
from torch import nn
class MLP(nn.Module):
def __init__(self,
in_channels: int,
widening_factor: int = 2): # Perceiver used 4
super().__init__()
self.linear_1 = nn.Linear(in_channels, in_channels * widening_factor)
self.act_1 = nn.LeakyReLU()
self.linear_2 = nn.Linear(in_channels * widening_factor, in_channels)
def forward(self, x: torch.Tensor):
x = self.linear_1(x)
x = self.act_1(x)
x = self.linear_2(x)
return x
| Python |
3D | marcdcfischer/PUNet | src/modules/blocks/similarity_aggregation.py | .py | 1,732 | 31 | import torch
import torch.nn.functional as F
def similarity_aggregation(latents, instructions, mean_aggregation: bool = False, top_k_selection: bool = False, soft_selection_sigma: float = 0.1):
"""
:param latents: [B, H*W*D, C]
:param instructions: [B, I, N, C]
:return: [B, I, H*W*D]
"""
# Note: Segmenter (https://github.com/rstrudel/segmenter) effectively does a linear layer with weights separate for content and class tokens before doing the comparison (but without changing the dimension)
# x_inst = self.linear_projection(dict_out['instructions']['segmentation_latents'])
x_sim = torch.einsum('b m c, b i n c -> b i n m',
F.normalize(latents, p=2, dim=-1),
F.normalize(instructions, p=2, dim=-1)) + 1. / 2. # Calculate similarities in range [0, 1] between instructions and content
# (Post) selection of instructions
assert mean_aggregation is False or top_k_selection is False # Both can't be true at once
if mean_aggregation:
x_sim = torch.mean(x_sim, dim=2)
elif top_k_selection:
# Top k selection with k=3. 1. Doesn't have to align to all, 2. Single outlier (max) is prevented due to top k averaging.
x_sim = torch.topk(x_sim, k=3, dim=2)[0]
x_sim = torch.mean(x_sim, dim=2) # Average similarities of top k tokens of the respective mask
else:
# Re-weight by relative importance (detached softmaxed similarities). 1. Doesn't have to align to all, 2. All instructions receive a (weighted) gradient.
x_sim = torch.softmax(x_sim.detach() / soft_selection_sigma, dim=2) * x_sim
x_sim = torch.sum(x_sim, dim=2) # Aggregate weighted similarities
return x_sim
| Python |
3D | marcdcfischer/PUNet | src/modules/blocks/__init__.py | .py | 0 | 0 | null | Python |
3D | marcdcfischer/PUNet | src/modules/blocks/query_encodings.py | .py | 31,761 | 450 | import torch
import torch.nn as nn
import einops
import math
from typing import Tuple, Optional, Sequence
import warnings
# see e.g. https://github.com/lucidrains/x-transformers/blob/55ca5d96c8b850b064177091f7a1dcfe784b24ce/x_transformers/x_transformers.py#L116
# Note: Perceiver uses slightly different formulation based on range from -1 to 1.
# Explanation - https://datascience.stackexchange.com/questions/51065/what-is-the-positional-encoding-in-the-transformer-model
class FourierPositionalEncoding(nn.Module):
""" Generate "classical" fourier (sinusoidal) pos encoding """
def __init__(self,
pos_channels: int = 64,
dims: int = 1,
base: float = 10000.):
super().__init__()
self.pos_channels = pos_channels
self.dims = dims
self.pos_channels_per_dim = self.pos_channels // self.dims
inv_freq = torch.exp(- torch.arange(0., self.pos_channels_per_dim, 2) / self.pos_channels_per_dim * math.log(base)) # base**(-2k/ch) = 1 / base**(2k/ch), k = 0, 2*1, ..., pos_channels_per_dim - 2
self.register_buffer('inv_freq', inv_freq)
def forward(self, x: torch.Tensor, dims: Tuple[int] = (2, 3, 4), offset: Tuple[int] = (0, 0, 0)):
emb_last = None
for idx_dim, dim_ in enumerate(dims):
pos_dim_ = torch.arange(0., x.shape[dim_], device=x.device) + offset[idx_dim]
sinusoid_inp_ = torch.einsum('i,j->ij', pos_dim_, self.inv_freq)
emb_ = torch.cat([sinusoid_inp_.sin(), sinusoid_inp_.cos()], dim=-1)
if idx_dim > 0:
emb_last = einops.repeat(emb_last, 'm i -> m n i', n=emb_.shape[0])
emb_ = einops.repeat(emb_, 'n j -> m n j', m=emb_last.shape[0])
emb_ = einops.rearrange(torch.concat([emb_last, emb_], dim=-1), 'm n k -> (m n) k')
emb_last = emb_
# Fill remaining dims with zeros
if emb_last.shape[1] < self.pos_channels:
emb_last = torch.cat([emb_last, torch.zeros((emb_last.shape[0], self.pos_channels - emb_last.shape[1]), device=emb_last.device)], dim=-1)
return einops.rearrange(emb_last, 'n d -> () n d')
class SparseFourierPositionalEncoding(nn.Module):
""" Generate "classical" fourier (sinusoidal) pos encoding """
def __init__(self,
pos_channels: int = 64,
dims: int = 1,
base: float = 10000.):
super().__init__()
self.pos_channels = pos_channels
self.dims = dims
self.pos_channels_per_dim = self.pos_channels // self.dims
inv_freq = torch.exp(- torch.arange(0., self.pos_channels_per_dim, 2) / self.pos_channels_per_dim * math.log(base)) # base**(-2k/ch) = 1 / base**(2k/ch), k = 0, 2*1, ..., pos_channels_per_dim - 2
self.register_buffer('inv_freq', inv_freq)
def forward(self, q: torch.Tensor, x_shape: Tuple[int], offset: Tuple[int] = (0, 0, 0)):
"""
:param q: [B N 3]. Positions from -1 to 1 across each dimension
:param x_shape: Shape of x. (h, w, d)
:param offset:
:return:
"""
emb_last = None
for idx_dim in range(q.shape[-1]):
pos_dim_ = (q[..., idx_dim] + 1. / 2.) * (x_shape[idx_dim] - 1.) + offset[idx_dim]
sinusoid_inp_ = torch.einsum('bi,j->bij', pos_dim_, self.inv_freq)
emb_ = torch.cat([sinusoid_inp_.sin(), sinusoid_inp_.cos()], dim=-1)
if idx_dim > 0:
emb_ = torch.concat([emb_last, emb_], dim=-1)
emb_last = emb_
# Fill remaining dims with zeros
if emb_last.shape[2] < self.pos_channels:
emb_last = torch.cat([emb_last, torch.zeros((emb_last.shape[0], emb_last.shape[1], self.pos_channels - emb_last.shape[2]), device=emb_last.device)], dim=-1)
return emb_last
class LearnedQuery(nn.Module):
def __init__(self,
n_queries: int,
query_channels: int = 512,
requires_grad: bool = True):
super().__init__()
self.query = nn.Parameter(nn.init.xavier_uniform_(torch.empty((n_queries, query_channels)),
gain=nn.init.calculate_gain('linear')), requires_grad=requires_grad) # [N, C]. Perceiver used truncated normal
def forward(self, batch_size: int):
return einops.repeat(self.query, 'n c -> b n c', b=batch_size)
class LearnedNormedQuery(nn.Module):
def __init__(self,
n_queries: int,
query_channels: int = 512,
requires_grad: bool = True):
super().__init__()
self.query = nn.Parameter(nn.init.xavier_uniform_(torch.empty((n_queries, query_channels)),
gain=nn.init.calculate_gain('linear')), requires_grad=requires_grad) # [N, C]. Perceiver used truncated normal
self.query_norm = nn.LayerNorm(query_channels, elementwise_affine=True)
def forward(self, batch_size: int):
return einops.repeat(self.query_norm(self.query), 'n c -> b n c', b=batch_size)
class LearnedNormedInstruction(nn.Module):
""" List of learned embeddings vector """
def __init__(self,
instruction_pool_size: int,
tokens_per_instruction: int = 10,
instruction_channels: int = 512,
requires_grad: bool = True,
use_norm: bool = False,
elementwise_affine: bool = True):
super().__init__()
self.instruction_pool_size = instruction_pool_size
self.instructions = nn.ParameterList()
for idx_i in range(instruction_pool_size):
self.instructions.append(nn.Parameter(nn.init.xavier_uniform_(torch.empty((tokens_per_instruction, instruction_channels)),
gain=nn.init.calculate_gain('linear')), requires_grad=requires_grad)) # [N, C]
self.instructions_norm = nn.LayerNorm(instruction_channels, elementwise_affine=elementwise_affine) if use_norm else None # Atm joint norm for all instructions.
def forward(self):
instructions = torch.stack(list(self.instructions), dim=0)
instructions = self.instructions_norm(instructions) if self.instructions_norm is not None else instructions
return instructions # [I, N, C]
class LearnedNormedPseudoInstruction(nn.Module):
""" List of learned embeddings vector """
def __init__(self,
instruction_pool_size_subjects: int,
instruction_pool_size_labels: int,
tokens_per_instruction: int = 10,
instruction_channels: int = 512,
requires_grad: bool = True,
elementwise_affine: bool = True):
super().__init__()
self.instruction_pool_size_subjects = instruction_pool_size_subjects
self.instruction_pool_size_labels = instruction_pool_size_labels
self.instructions = nn.ParameterList()
for idx_i in range(instruction_pool_size_subjects):
self.instructions.append(nn.Parameter(nn.init.xavier_uniform_(torch.empty((instruction_pool_size_labels, tokens_per_instruction, instruction_channels)),
gain=nn.init.calculate_gain('linear')), requires_grad=requires_grad)) # [N, C]
self.instructions_norm = nn.LayerNorm(instruction_channels, elementwise_affine=elementwise_affine) # Atm joint norm for all instructions.
def forward(self, idx_subject, idx_label):
# Gather and update only params of available subjects (to prevent potential excessive grad calc)
instructions = self.instructions_norm(self.instructions[idx_subject][idx_label, ...])
return instructions # [N, C]
# See https://github.com/lucidrains/x-transformers/blob/55ca5d96c8b850b064177091f7a1dcfe784b24ce/x_transformers/x_transformers.py#L116 for potential implementations
# https://github.com/epfml/attention-cnn/blob/master/models/bert.py
# https://github.com/TensorUI/relative-position-pytorch/blob/master/relative_position.py
class InstructedAttentionPositionScores(nn.Module):
"""
A combination of neural interpreter learned position, alibi (https://arxiv.org/pdf/2108.12409.pdf), relative position representations (https://arxiv.org/pdf/1803.02155.pdf, https://arxiv.org/pdf/1906.05909.pdf) and other relative position schemes.
A (computational) efficient form for an attention score: q^T * k + w_h^T * emb[diff] with a learned embedding for each diff and a learned weight vector for each category and attention head.
"""
def __init__(self,
embedding_dim: int = 64,
heads: int = 4,
tokens_per_instruction: int = 10,
max_absolute_positions: Sequence[int] = (64, 64, 1), # Max absolute positions index
max_capped_distances: Sequence[int] = (64, 64, 1), # Max capped relative distances
unique_token_bias: bool = True,
no_bias_instructions: bool = False, # Disables weights for instructions and cross biases.
no_bias_content: bool = False): # Disables content weights
super().__init__()
self.heads = heads
self.tokens_per_instruction = tokens_per_instruction
self.unique_token_bias = unique_token_bias
self.max_token_positions = tokens_per_instruction if unique_token_bias else 1
self.max_absolute_positions = max_absolute_positions
self.max_capped_distances = max_capped_distances
self.no_bias_instructions = no_bias_instructions
self.no_bias_content = no_bias_content
self.inv_temperature = embedding_dim ** -0.5
# Learned encoding
self.encoding_intra_instructions = nn.Parameter(nn.init.xavier_uniform_(torch.empty((self.max_token_positions, self.max_token_positions, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True) # Encoding for instructions of (intra) connections same category
self.encoding_inter_instructions = nn.Parameter(nn.init.xavier_uniform_(torch.empty((self.max_token_positions, self.max_token_positions, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True) # Encoding for instructions of (inter) connections of different categories
self.encoding_cross_inst_content = nn.Parameter(nn.init.xavier_uniform_(torch.empty((self.max_token_positions, 1, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.encoding_cross_content_inst = nn.Parameter(nn.init.xavier_uniform_(torch.empty((1, self.max_token_positions, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.encoding_content_h = nn.Parameter(nn.init.xavier_uniform_(torch.empty((2 * self.max_capped_distances[0] - 1, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.encoding_content_w = nn.Parameter(nn.init.xavier_uniform_(torch.empty((2 * self.max_capped_distances[1] - 1, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.encoding_content_d = nn.Parameter(nn.init.xavier_uniform_(torch.empty((2 * self.max_capped_distances[2] - 1, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
# To be encountered relative distances
relative_distances_h = torch.arange(self.max_absolute_positions[0], dtype=torch.long).reshape(1, -1) - torch.arange(self.max_absolute_positions[0], dtype=torch.long).reshape(-1, 1)
relative_distances_h = torch.clamp(relative_distances_h + self.max_capped_distances[0] - 1, min=0, max=(self.max_capped_distances[0] - 1) * 2)
self.register_buffer('relative_distances_h', relative_distances_h)
relative_distances_w = torch.arange(self.max_absolute_positions[1], dtype=torch.long).reshape(1, -1) - torch.arange(self.max_absolute_positions[1], dtype=torch.long).reshape(-1, 1)
relative_distances_w = torch.clamp(relative_distances_w + self.max_capped_distances[1] - 1, min=0, max=(self.max_capped_distances[1] - 1) * 2)
self.register_buffer('relative_distances_w', relative_distances_w)
relative_distances_d = torch.arange(self.max_absolute_positions[2], dtype=torch.long).reshape(1, -1) - torch.arange(self.max_absolute_positions[2], dtype=torch.long).reshape(-1, 1)
relative_distances_d = torch.clamp(relative_distances_d + self.max_capped_distances[2] - 1, min=0, max=(self.max_capped_distances[2] - 1) * 2)
self.register_buffer('relative_distances_d', relative_distances_d)
# Learned weights (per head) to calculate score - similar to neural interpreter
# Note: this variant is query independent (this replaces q in q^T * emb[diff]).
self.weights_intra_instructions = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.weights_inter_instructions = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.weights_cross_inst_content = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.weights_cross_content_inst = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.weights_content_h = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.weights_content_w = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.weights_content_d = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
def forward(self, dim_q, dim_k, dim_i, dim_h, dim_w, dim_d, device: Optional[torch.device] = None):
"""
:param dim_q: queries dim
:param dim_k: keys dim
:param dim_i: actual instructions dim
:param dim_h: actual height dim
:param dim_w: actual width dim
:param dim_d: actual depth dim
:return: additive attention scores
"""
# Retrieve embeddings according to relative / absolute / categorical position
n_instruction_categories = dim_i // self.tokens_per_instruction
if dim_i > 0:
assert n_instruction_categories > 0
intra_mask = torch.eye(n_instruction_categories, dtype=torch.float, device=device).reshape(n_instruction_categories, 1, n_instruction_categories, 1, 1) \
.expand(-1, self.tokens_per_instruction, -1, self.tokens_per_instruction, -1).reshape(dim_i, dim_i, 1) # Block sparse mask [I, I, 1]
if self.unique_token_bias:
# Unique learnable positional embedding for all tokens (across all instructions)
intra_instruction_embeddings = intra_mask * self.encoding_intra_instructions.repeat(n_instruction_categories, n_instruction_categories, 1) # [I, I, C]
inter_instruction_embeddings = (~intra_mask.bool()).float() * self.encoding_inter_instructions.repeat(n_instruction_categories, n_instruction_categories, 1) # [I, I, C]
cross_inst_content_embeddings = self.encoding_cross_inst_content.repeat(n_instruction_categories, 1, 1)
cross_content_inst_embeddings = self.encoding_cross_content_inst.repeat(1, n_instruction_categories, 1)
else:
# Same learnable positional embedding for all tokens (across all instructions)
intra_instruction_embeddings = intra_mask * self.encoding_intra_instructions.expand(dim_i, dim_i, -1) # [I, I, C]
inter_instruction_embeddings = (~intra_mask.bool()).float() * self.encoding_inter_instructions.expand(dim_i, dim_i, -1) # [I, I, C]
cross_inst_content_embeddings = self.encoding_cross_inst_content.expand(dim_i, -1, -1)
cross_content_inst_embeddings = self.encoding_cross_content_inst.expand(-1, dim_i, -1)
row_embeddings = self.encoding_content_h[self.relative_distances_h[:dim_h, :dim_h], :] # [H, H, C]. Relative row positions
col_embeddings = self.encoding_content_w[self.relative_distances_w[:dim_w, :dim_w], :] # [W, W, C]. Relative column positions
depth_embeddings = self.encoding_content_d[self.relative_distances_d[:dim_d, :dim_d], :] # [D, D, C]. Relative depth positions
intra_instruction_scores = torch.einsum('h c, n m c -> h n m', self.weights_intra_instructions, intra_instruction_embeddings) # [Heads, I, I]
inter_instruction_scores = torch.einsum('h c, n m c -> h n m', self.weights_inter_instructions, inter_instruction_embeddings) # [Heads, I, I]
instruction_scores = intra_instruction_scores + inter_instruction_scores
cross_inst_content_scores = torch.einsum('h c, n m c -> h n m', self.weights_cross_inst_content, cross_inst_content_embeddings) # [Heads, I, 1]
cross_content_inst_scores = torch.einsum('h c, n m c -> h n m', self.weights_cross_content_inst, cross_content_inst_embeddings) # [Heads, 1, I]
row_scores = torch.einsum('h c, n m c -> h n m', self.weights_content_h, row_embeddings) # [Heads, H, H]
col_scores = torch.einsum('h c, n m c -> h n m', self.weights_content_w, col_embeddings) # [Heads, W, W]
depth_scores = torch.einsum('h c, n m c -> h n m', self.weights_content_d, depth_embeddings) # [Heads, D, D]
content_scores = einops.rearrange(row_scores, 'h n m -> h n () () m () ()') + einops.rearrange(col_scores, 'h n m -> h () n () () m ()') + einops.rearrange(depth_scores, 'h n m -> h () () n () () m') # [Heads, H, W, D, H, W, D]
content_scores /= 3
content_scores = einops.rearrange(content_scores, 'h i j k l m n -> h (i j k) (l m n)') # [Heads, #Content, #Content]
# Attention score matrix
# A | B
# - - -
# C | D
scores = torch.zeros((self.heads, dim_q, dim_k), dtype=torch.float, device=device)
if not self.no_bias_instructions:
scores[:, :dim_i, :dim_i] = instruction_scores # [Heads, I, I]. Matrix A
scores[:, :dim_i, dim_i:] = cross_inst_content_scores.expand(-1, -1, dim_k - dim_i) # [Heads, I, #Content]. Matrix B
scores[:, dim_i:, :dim_i] = cross_content_inst_scores.expand(-1, dim_q - dim_i, -1) # [Heads, #Content, I]. Matrix C
if not self.no_bias_content:
scores[:, dim_i:, dim_i:] = content_scores # [Heads, #Content, #Content]. Matrix D
# (Pre-)scale scores
scores *= self.inv_temperature
return einops.rearrange(scores, 'h q k -> () h q k')
def parameters_bias_content(self):
return [self.encoding_content_h, self.encoding_content_w, self.encoding_content_d,
self.weights_content_h, self.weights_content_w, self.weights_content_d]
def parameters_bias_instructions(self):
return [self.encoding_intra_instructions, self.encoding_inter_instructions, self.encoding_cross_content_inst, self.encoding_cross_inst_content,
self.weights_intra_instructions, self.weights_inter_instructions, self.weights_cross_content_inst, self.weights_cross_inst_content]
class DeepInstructedAttentionPositionScores(nn.Module):
"""
Only inst -> cont and relative positions are needed for this case (others are 0 since instructions are not further used).
"""
def __init__(self,
embedding_dim: int = 64,
heads: int = 4,
instruction_pool_size: int = 2,
tokens_per_instruction: int = 10,
separate_background: bool = True,
max_absolute_positions: Sequence[int] = (64, 64, 1), # Max absolute positions index
max_capped_distances: Sequence[int] = (64, 64, 1), # Max capped relative distances
unique_instruction_bias: bool = True,
unique_token_bias: bool = True,
no_bias_instructions: bool = False, # Disables weights for instructions and cross biases.
no_bias_content: bool = False): # Disables content weights
super().__init__()
self.heads = heads
self.tokens_per_instruction = tokens_per_instruction
self.separate_background = separate_background
self.unique_instruction_bias = unique_instruction_bias
self.unique_token_bias = unique_token_bias
self.max_instructions = instruction_pool_size if unique_instruction_bias else 1
self.max_token_positions = tokens_per_instruction if unique_token_bias else 1
self.max_absolute_positions = max_absolute_positions
self.max_capped_distances = max_capped_distances
self.no_bias_instructions = no_bias_instructions
self.no_bias_content = no_bias_content
self.embedding_dim = embedding_dim
self.inv_temperature = embedding_dim ** -0.5
# Learned encoding
self.encoding_cross_inst_content = nn.ParameterList()
for _ in range(self.max_instructions):
self.encoding_cross_inst_content.append(nn.Parameter(nn.init.xavier_uniform_(torch.empty((self.max_token_positions, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True))
self.encoding_content_h = nn.Parameter(nn.init.xavier_uniform_(torch.empty((2 * self.max_capped_distances[0] - 1, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.encoding_content_w = nn.Parameter(nn.init.xavier_uniform_(torch.empty((2 * self.max_capped_distances[1] - 1, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.encoding_content_d = nn.Parameter(nn.init.xavier_uniform_(torch.empty((2 * self.max_capped_distances[2] - 1, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
# To be encountered relative distances
relative_distances_h = torch.arange(self.max_absolute_positions[0], dtype=torch.long).reshape(1, -1) - torch.arange(self.max_absolute_positions[0], dtype=torch.long).reshape(-1, 1)
relative_distances_h = torch.clamp(relative_distances_h + self.max_capped_distances[0] - 1, min=0, max=(self.max_capped_distances[0] - 1) * 2)
self.register_buffer('relative_distances_h', relative_distances_h)
relative_distances_w = torch.arange(self.max_absolute_positions[1], dtype=torch.long).reshape(1, -1) - torch.arange(self.max_absolute_positions[1], dtype=torch.long).reshape(-1, 1)
relative_distances_w = torch.clamp(relative_distances_w + self.max_capped_distances[1] - 1, min=0, max=(self.max_capped_distances[1] - 1) * 2)
self.register_buffer('relative_distances_w', relative_distances_w)
relative_distances_d = torch.arange(self.max_absolute_positions[2], dtype=torch.long).reshape(1, -1) - torch.arange(self.max_absolute_positions[2], dtype=torch.long).reshape(-1, 1)
relative_distances_d = torch.clamp(relative_distances_d + self.max_capped_distances[2] - 1, min=0, max=(self.max_capped_distances[2] - 1) * 2)
self.register_buffer('relative_distances_d', relative_distances_d)
# Learned weights (per head) to calculate score - similar to neural interpreter
# Note: this variant is query independent (this replaces q in q^T * emb[diff]).
self.weights_cross_inst_content = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.weights_content_h = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.weights_content_w = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
self.weights_content_d = nn.Parameter(nn.init.xavier_uniform_(torch.empty((heads, embedding_dim)),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
def forward(self, dim_q, dim_k, dim_i, dim_h, dim_w, dim_d, label_indices: Optional[torch.Tensor] = None, device: Optional[torch.device] = None):
"""
:param dim_q: queries dim
:param dim_k: keys dim
:param dim_i: actual instructions dim
:param dim_h: actual height dim
:param dim_w: actual width dim
:param dim_d: actual depth dim
:return: additive attention scores
"""
# Retrieve embeddings according to relative / absolute / categorical position
n_instruction_categories = dim_i // self.tokens_per_instruction
if dim_i > 0:
assert n_instruction_categories > 0
if label_indices is not None:
encodings_ = list()
for idx_batch in range(label_indices.shape[0]):
if self.separate_background:
instruction_indices_true = torch.nonzero(label_indices[idx_batch, 1:], as_tuple=False).squeeze(dim=1) + 1 # All indices with designated background instruction ignored
else:
instruction_indices_true = torch.nonzero(label_indices[idx_batch, :], as_tuple=False).squeeze(dim=1) # All indices including shared background
encoding_ = torch.concat(list(self.encoding_cross_inst_content[instruction_indices_true]), dim=0) if instruction_indices_true.numel() > 1 else self.encoding_cross_inst_content[instruction_indices_true]
encodings_.append(encoding_) # [I_active * T, C]
encodings_ = torch.stack(encodings_, dim=0) # [B, I_active * T, C]
else:
encodings_ = torch.concat(list(self.encoding_cross_inst_content[:n_instruction_categories]), dim=0).unsqueeze(dim=0) if n_instruction_categories > 1 else self.encoding_cross_inst_content[:n_instruction_categories] # [1, I_active * T, C]
if self.unique_instruction_bias and self.unique_token_bias:
# Unique learnable positional embedding for all tokens and all instructions
cross_inst_content_embeddings = encodings_
elif not self.unique_instruction_bias and self.unique_token_bias:
cross_inst_content_embeddings = self.encoding_cross_inst_content[0].unsqueeze(dim=0).repeat(1, n_instruction_categories, 1)
elif self.unique_instruction_bias and not self.unique_token_bias:
cross_inst_content_embeddings = encodings_.repeat_interleave(1, self.tokens_per_instruction, 1)
else:
# Same learnable positional embedding for all tokens (across all instructions)
cross_inst_content_embeddings = self.encoding_cross_inst_content[0].unsqueeze(dim=0).expand(-1, dim_i, -1)
else:
warnings.warn(f'Using empty bias score.')
cross_inst_content_embeddings = torch.zeros((1, dim_i, self.embedding_dim), dtype=torch.float, device=device)
row_embeddings = self.encoding_content_h[self.relative_distances_h[:dim_h, :dim_h], :] # [H, H, C]. Relative row positions
col_embeddings = self.encoding_content_w[self.relative_distances_w[:dim_w, :dim_w], :] # [W, W, C]. Relative column positions
depth_embeddings = self.encoding_content_d[self.relative_distances_d[:dim_d, :dim_d], :] # [D, D, C]. Relative depth positions
cross_inst_content_scores = torch.einsum('h c, b n c -> b h n', self.weights_cross_inst_content, cross_inst_content_embeddings) # [B, Heads, I]
row_scores = torch.einsum('h c, n m c -> h n m', self.weights_content_h, row_embeddings).unsqueeze(dim=0) # [1, Heads, H, H]
col_scores = torch.einsum('h c, n m c -> h n m', self.weights_content_w, col_embeddings).unsqueeze(dim=0) # [1, Heads, W, W]
depth_scores = torch.einsum('h c, n m c -> h n m', self.weights_content_d, depth_embeddings).unsqueeze(dim=0) # [1, Heads, D, D]
content_scores = einops.rearrange(row_scores, 'b h n m -> b h n () () m () ()') + einops.rearrange(col_scores, 'b h n m -> b h () n () () m ()') + einops.rearrange(depth_scores, 'b h n m -> b h () () n () () m') # [1, Heads, H, W, D, H, W, D]
content_scores /= 3
content_scores = einops.rearrange(content_scores, 'b h i j k l m n -> b h (i j k) (l m n)') # [1, Heads, #Content, #Content]
# Attention score matrix
# A (0) | B (0)
# - - -
# C | D
scores = torch.zeros((cross_inst_content_scores.shape[0], self.heads, dim_q, dim_k), dtype=torch.float, device=device)
if not self.no_bias_instructions:
scores[:, :, dim_i:, :dim_i] = cross_inst_content_scores.unsqueeze(dim=-2).expand(-1, -1, dim_k - dim_i, -1) # [B, Heads, #Content, I]. Matrix B
if not self.no_bias_content:
scores[:, :, dim_i:, dim_i:] = content_scores # [B, Heads, #Content, #Content]. Matrix D
# (Pre-)scale scores
scores *= self.inv_temperature
return scores
def named_parameters_bias_content(self):
params_ = [(name_, param_) for name_, param_ in self.named_parameters() if any([str_ in name_ for str_ in ['encoding_content', 'weights_content']])]
return params_
# return [self.encoding_content_h.named_parameters(), self.encoding_content_w, self.encoding_content_d,
# self.weights_content_h, self.weights_content_w, self.weights_content_d]
def named_parameters_bias_instructions(self):
params_ = [(name_, param_) for name_, param_ in self.named_parameters() if any([str_ in name_ for str_ in ['encoding_cross_inst_content', 'weights_cross_inst_content']])]
return params_
# return [self.encoding_cross_inst_content,
# self.weights_cross_inst_content]
| Python |
3D | marcdcfischer/PUNet | src/modules/blocks/sia_up_block_deep.py | .py | 4,587 | 93 | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.modules.blocks.sia_block_deep import DeepShiftedInstructedAttentionBlock
from typing import Sequence, Optional, List
from monai.networks.blocks import Convolution
from monai.networks.layers.utils import get_act_layer, get_norm_layer
from itertools import chain
class DeepSIAUpBlock(nn.Module):
def __init__(self,
channels_in: int,
channels_out: int,
instruction_pool_size: int,
tokens_per_instruction: int,
separate_background: bool = True,
kernel_size: Sequence[int] = (3, 3, 1),
strides: Sequence[float] = (2, 2, 1),
heads: int = 4,
window_size: Sequence[int] = (8, 8, 1),
act: str = "leakyrelu",
norm: str = "batch",
unique_instruction_bias: bool = True,
unique_token_bias: bool = True,
no_bias_instructions: bool = False,
no_bias_content: bool = False,
adapter: bool = False):
super().__init__()
self.channels_in = channels_in
self.channels_out = channels_out
self.up = nn.Upsample(scale_factor=strides, mode='trilinear', align_corners=False)
self.act = get_act_layer(name=act)
self.norm_concat = get_norm_layer(name=norm, spatial_dims=3, channels=self.channels_in + self.channels_out)
self.conv_concat = Convolution(spatial_dims=3,
in_channels=self.channels_in + self.channels_out,
out_channels=self.channels_out,
kernel_size=kernel_size,
strides=(1, 1, 1),
act="leakyrelu",
norm="batch",
is_transposed=False,
conv_only=True)
self.sia = DeepShiftedInstructedAttentionBlock(hidden_channels=self.channels_out,
instruction_pool_size=instruction_pool_size,
tokens_per_instruction=tokens_per_instruction,
separate_background=separate_background,
heads=heads,
window_size=window_size,
unique_instruction_bias=unique_instruction_bias,
unique_token_bias=unique_token_bias,
no_bias_instructions=no_bias_instructions,
no_bias_content=no_bias_content,
adapter=adapter)
def forward(self, x: torch.Tensor, x_skips: torch.Tensor, x_instructions: Optional[List[torch.Tensor]] = None, label_indices: Optional[torch.Tensor] = None):
# Upsample lower res content
x = self.up(x)
# Concat and linear / conv projection of channels
assert all([x_ - y_ <= 1 for x_, y_ in zip(x.shape[2:], x_skips.shape[2:])])
x = self.conv_concat(self.act(self.norm_concat(torch.cat([x[...,
:x_skips.shape[2],
:x_skips.shape[3],
:x_skips.shape[4]], x_skips], dim=1)))) # cropped if upsampled version is too large.
# SIA
x = self.sia(x, x_instructions, label_indices=label_indices)
return x
def named_parameters_body(self):
parameters_down = list(chain(*[self.conv_concat.named_parameters(), self.norm_concat.named_parameters()]))
parameters_sia_att = self.sia.named_parameters_attention()
parameters_bias_content = self.sia.named_parameters_bias_content()
return list(chain(*[parameters_down, parameters_sia_att, parameters_bias_content]))
def named_parameters_adapter(self):
return self.sia.named_parameters_adapter()
def named_parameters_bias_instructions(self):
parameters_bias_instructions = list(self.sia.named_parameters_bias_instructions())
return parameters_bias_instructions
| Python |
3D | marcdcfischer/PUNet | src/modules/blocks/attention.py | .py | 5,610 | 132 | import torch
from torch import nn
from typing import Optional
import einops
from torch.utils.checkpoint import checkpoint
class Attention(nn.Module):
def __init__(self,
q_channels: int,
kv_channels: Optional[int] = None,
heads_channels: Optional[int] = None,
out_channels: Optional[int] = None,
heads: int = 4):
super().__init__()
kv_channels = kv_channels if kv_channels is not None else q_channels
heads_channels = heads_channels if heads_channels is not None else q_channels
out_channels = out_channels if out_channels is not None else q_channels
inner_channels = heads_channels
assert inner_channels % heads == 0
self.scale = heads_channels ** -0.5
self.heads = heads
self.to_q = nn.Linear(q_channels, inner_channels, bias=False)
self.to_k = nn.Linear(kv_channels, inner_channels, bias=False)
self.to_v = nn.Linear(kv_channels, inner_channels, bias=False)
self.to_out = nn.Linear(inner_channels, out_channels)
def forward(self, q: torch.Tensor, kv: torch.Tensor, pos_q: Optional[torch.Tensor] = None, pos_k: Optional[torch.Tensor] = None):
# Transform to q, k, v and add pos info
q = self.to_q(q)
k = self.to_k(kv)
v = self.to_v(kv)
# Rearrange
q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> b h n d', h=self.heads), (q, k, v))
# Add pos
q = q + einops.rearrange(pos_q, 'b n d -> b () n d') if pos_q is not None else q
k = k + einops.rearrange(pos_k, 'b n d -> b () n d') if pos_k is not None else k
# Attention
sim = torch.einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn_score = sim.softmax(dim=-1)
out = torch.einsum('b h i j, b h j d -> b h i d', attn_score, v)
out = einops.rearrange(out, 'b h n d -> b n (h d)', h=self.heads)
return self.to_out(out)
class WindowedMaskedAttention(nn.Module):
def __init__(self,
q_channels: int,
kv_channels: Optional[int] = None,
heads_channels: Optional[int] = None,
out_channels: Optional[int] = None,
separate_norms: bool = False,
heads: int = 4,
use_checkpoint: bool = True):
super().__init__()
kv_channels = kv_channels if kv_channels is not None else q_channels
heads_channels = heads_channels if heads_channels is not None else q_channels // heads
out_channels = out_channels if out_channels is not None else q_channels
inner_channels = q_channels
assert inner_channels % heads == 0
self.use_checkpoint = use_checkpoint
self.heads = heads
self.eps = 1e-8
self.pre_norm_q, self.pre_norm_k, self.pre_norm_v = None, None, None
if separate_norms:
self.pre_norm_q = nn.LayerNorm(q_channels)
self.pre_norm_k = nn.LayerNorm(kv_channels)
self.pre_norm_v = nn.LayerNorm(kv_channels)
self.to_q = nn.Linear(q_channels, inner_channels, bias=False)
self.to_k = nn.Linear(kv_channels, inner_channels, bias=False)
self.to_v = nn.Linear(kv_channels, inner_channels, bias=False)
self.masked_attention_calc = WindowedMaskedAttentionCalculation(inv_temperature=heads_channels ** -0.5)
self.to_out = nn.Linear(inner_channels, out_channels)
def forward(self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
mask: Optional[torch.Tensor] = None,
pos_q: Optional[torch.Tensor] = None,
pos_k: Optional[torch.Tensor] = None,
pos_scores: Optional[torch.Tensor] = None):
# Transform to q, k, v and add pos info
q = self.to_q(self.pre_norm_q(q)) if self.pre_norm_q is not None else self.to_q(q)
k = self.to_k(self.pre_norm_k(k)) if self.pre_norm_k is not None else self.to_k(q)
v = self.to_v(self.pre_norm_v(v)) if self.pre_norm_v is not None else self.to_v(q)
# Rearrange
q, k, v = map(lambda t: einops.rearrange(t, 'b p n (h d) -> b p h n d', h=self.heads), (q, k, v))
# Add pos
q = q + pos_q if pos_q is not None else q
k = k + pos_k if pos_k is not None else k
# Masked Attention
if self.use_checkpoint:
out = checkpoint(self.masked_attention_calc, q, k, v, pos_scores, mask, preserve_rng_state=False, use_reentrant=False)
else:
out = self.masked_attention_calc(q, k, v, pos_scores, mask)
out = einops.rearrange(out, 'b p h n d -> b p n (h d)', h=self.heads)
out = self.to_out(out)
return out
class WindowedMaskedAttentionCalculation(nn.Module):
def __init__(self,
inv_temperature: float):
super().__init__()
self.inv_temperature = inv_temperature
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, pos_scores: Optional[torch.Tensor] = None, mask: Optional[torch.Tensor] = None):
sim = torch.einsum('b p h i d, b p h j d -> b p h i j', q, k)
sim *= self.inv_temperature # Scaled prior to pos_scores (since they are already pre-scaled atm)
sim = sim + pos_scores if pos_scores is not None else sim
sim = mask * sim if mask is not None else sim
attn_score = sim.softmax(dim=-1)
out = torch.einsum('b p h i j, b p h j d -> b p h i d', attn_score, v)
return out
| Python |
3D | marcdcfischer/PUNet | src/modules/losses/__init__.py | .py | 0 | 0 | null | Python |
3D | marcdcfischer/PUNet | src/modules/losses/contrastive_protos_teacher.py | .py | 18,421 | 241 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Union, Dict, Tuple, List, Optional
import math
import einops
import numpy as np
class ContrastiveProtosTeacherLoss(nn.Module):
def __init__(self,
reduction_factor: float = 4., # Grid sampling to make loss calc feasible
reduction_factor_protos: float = 16., # Undersampling factor for prototype seeds
loss_weight: float = 1.0,
k_means_iterations: int = 3,
use_weighting_protos: bool = True,
use_weighting_teacher: bool = False,
fwhm_student_teacher: float = 128.,
fwhm_teacher_protos: float = 128.):
super().__init__()
self.reduction_factor = reduction_factor
self.reduction_factor_protos = reduction_factor_protos
self.loss_weight = loss_weight
self.k_means_iterations = k_means_iterations
self.use_weighting_protos = use_weighting_protos
self.use_weighting_teacher = use_weighting_teacher
self.fwhm_student_teacher = fwhm_student_teacher
self.fwhm_teacher_protos = fwhm_teacher_protos
def forward(self,
embeddings_students: List[torch.Tensor],
embeddings_teacher: torch.Tensor,
frames: torch.Tensor,
coord_grids_students: List[torch.Tensor],
coord_grids_teacher: torch.Tensor,
temp_proto_teacher: float = 0.033, # atm hardcoded
temp_proto_student: float = 0.066, # atm hardcoded
dropout_rate: float = 0.2):
"""
:param embeddings_students: [B, C, H, W, D]
:param embeddings_teacher: [B, C, H, W, D]
:param frames:
:param coord_grids_students:
:param coord_grids_teacher:
:param dropout_rate:
:return:
"""
losses, plots = dict(), dict()
device_ = embeddings_students[0].device
n_batch, n_channels = embeddings_students[0].shape[0], embeddings_students[0].shape[1]
n_students = len(embeddings_students)
# Sample seeds for prototype clustering (atm on a grid)
with torch.no_grad():
theta = torch.tensor([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.]], device=device_).unsqueeze(0)
reduced_size = [max(int(s_ // self.reduction_factor_protos), 1) for s_ in embeddings_teacher.shape[2:]] # [3]
affine_grids = F.affine_grid(theta=theta, size=[1, 1, *reduced_size], align_corners=False).expand(n_batch, -1, -1, -1, -1) # [B, H', W', D', 3]
embeddings_teacher_sampled = F.grid_sample(embeddings_teacher, grid=affine_grids, mode='bilinear', padding_mode='reflection', align_corners=False) # [B, C, H', W', D']
coord_grids_teacher_sampled = F.grid_sample(coord_grids_teacher, grid=affine_grids, mode='bilinear', padding_mode='reflection', align_corners=False) # [B, 3, H', W', D']
n_patch_sampled = math.prod(embeddings_teacher_sampled.shape[2:])
# (Down-)sample student and teacher embeddings
embeddings_students_reduced, coord_grids_students_reduced = [None for _ in range(len(embeddings_students))], [None for _ in range(len(embeddings_students))]
embeddings_teacher_reduced, coord_grids_teacher_reduced = None, None
for idx_emb, tuple_emb_grid_ in enumerate(zip(embeddings_students + [embeddings_teacher], coord_grids_students + [coord_grids_teacher])):
with torch.no_grad():
theta = torch.tensor([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.]], device=device_).unsqueeze(0)
reduced_size = [max(int(s_ // self.reduction_factor), 1) for s_ in tuple_emb_grid_[0].shape[2:]] # [3]
affine_grids = F.affine_grid(theta=theta, size=[1, 1, *reduced_size], align_corners=False).expand(n_batch, -1, -1, -1, -1) # [B, H', W', D', 3]
spatial_jitter = torch.randint(low=0, high=int(math.ceil(self.reduction_factor)), size=(4,))
if idx_emb < n_students:
embeddings_ = tuple_emb_grid_[0][:, :, spatial_jitter[0]: tuple_emb_grid_[0].shape[2] - spatial_jitter[1], spatial_jitter[2]: tuple_emb_grid_[0].shape[3] - spatial_jitter[3], :]
coord_grids_ = tuple_emb_grid_[1][:, :, spatial_jitter[0]: tuple_emb_grid_[1].shape[2] - spatial_jitter[1], spatial_jitter[2]: tuple_emb_grid_[1].shape[3] - spatial_jitter[3], :]
else:
embeddings_ = tuple_emb_grid_[0]
coord_grids_ = tuple_emb_grid_[1]
embeddings_ = F.grid_sample(embeddings_, grid=affine_grids, mode='bilinear', padding_mode='reflection', align_corners=False) # [B, C, H', W', D']
coord_grids_ = F.grid_sample(coord_grids_, grid=affine_grids, mode='bilinear', padding_mode='reflection', align_corners=False)
if idx_emb < n_students:
embeddings_students_reduced[idx_emb] = embeddings_
coord_grids_students_reduced[idx_emb] = coord_grids_
else:
embeddings_teacher_reduced = embeddings_
coord_grids_teacher_reduced = coord_grids_
# Contrastive losses
loss_sim_clustered = [torch.zeros((0,), device=device_) for _ in range(n_students)]
unique_frames = np.unique(frames)
for idx_frame, frame_ in enumerate(unique_frames): # Note: This differentiation is not needed anymore, but may be helpful in the future for optional losses.
valid_entries = np.array(frames) == frame_
n_valid = np.count_nonzero(valid_entries)
# if n_valid > 1:
# Generate masks
with torch.no_grad():
pos_weights_student_teacher, indices_closest, mask_max_sim_dist = generate_masks_student_teacher(
coord_grids_student=[x_[valid_entries, ...] for x_ in coord_grids_students_reduced],
coord_grids_teacher=coord_grids_teacher_reduced[valid_entries, ...],
embedding_size=[x_.shape[2:] for x_ in embeddings_students_reduced],
embedding_size_teacher=embeddings_teacher_reduced.shape[2:],
fwhm=self.fwhm_student_teacher,
)
# plots['pos_masks_student_proto'] = pos_weights_teacher_protos.reshape(n_valid, *embeddings_teacher.shape[2:], *embeddings_teacher_sampled.shape[2:])
# Std contrastive learning to prototype targets
# Retrieve proxy samples (that serve as proxy targets)
# Note: we use a soft-assignment of prototypes chosen on a grid (as seeds / surrogates)
with torch.no_grad():
embeddings_teacher_valid = einops.rearrange(embeddings_teacher_reduced[valid_entries, ...], 'v c h w d -> v (h w d) c') # [B, N, C]
embeddings_teacher_valid_normed = F.normalize(embeddings_teacher_valid, p=2, dim=-1)
embeddings_teacher_sampled_valid = einops.rearrange(embeddings_teacher_sampled[valid_entries, ...], 'v c h w d -> v (h w d) c') # [V, P, C]
embeddings_teacher_sampled_valid_normed = F.normalize(embeddings_teacher_sampled_valid, p=2, dim=-1)
# Calc protos by soft k-means
embeddings_protos_valid_normed = embeddings_teacher_sampled_valid_normed
coords_protos = einops.rearrange(coord_grids_teacher_sampled[valid_entries, ...], 'v c h w d -> v (h w d) c') # [V, P, 3]
for idx_itr in range(self.k_means_iterations):
# Calc alignment
sim_emb_emb_teacher_protos = torch.einsum('v n c, v p c -> v n p', embeddings_teacher_valid_normed, embeddings_protos_valid_normed) # [V, N, P]. Similarities between all teacher elements and sampled ones
sim_emb_emb_teacher_protos_soft = torch.softmax(sim_emb_emb_teacher_protos / temp_proto_teacher, dim=-1) # Cluster alignment
# Calc position weights
pos_weights_teacher_protos = generate_masks_teacher_protos(
coord_grids_teacher=coord_grids_teacher_reduced[valid_entries, ...],
coord_grids_protos=coords_protos,
embedding_size_teacher=embeddings_teacher_reduced.shape[2:],
fwhm=self.fwhm_teacher_protos,
)
sim_emb_emb_teacher_teacher_sampled_soft_weighted = sim_emb_emb_teacher_protos_soft * pos_weights_teacher_protos if self.use_weighting_protos else sim_emb_emb_teacher_protos_soft
# Aggregate new protos and coords
embeddings_protos_valid = torch.einsum('v n p, v n c -> v p c', sim_emb_emb_teacher_teacher_sampled_soft_weighted, embeddings_teacher_valid)\
/ torch.sum(sim_emb_emb_teacher_teacher_sampled_soft_weighted, dim=1).unsqueeze(-1) # [V, P, C] / [V, P, 1]. Denominator is not rly needed (if it is renormalized directly afterwards)
embeddings_protos_valid_normed = F.normalize(embeddings_protos_valid, p=2, dim=-1) # [V, P, C]
coords_protos = torch.einsum('v n p, v n c -> v p c', sim_emb_emb_teacher_teacher_sampled_soft_weighted, einops.rearrange(coord_grids_teacher_reduced[valid_entries, ...], 'v c h w d -> v (h w d) c'))\
/ torch.sum(sim_emb_emb_teacher_teacher_sampled_soft_weighted, dim=1).unsqueeze(-1) # [V, P, C] / [V, P, 1]
# Recalc teacher proxy alignment
sim_emb_emb_teacher_proxy = torch.einsum('v n c, v p c -> v n p', embeddings_teacher_valid_normed, embeddings_protos_valid_normed)
sim_emb_emb_teacher_proxy_soft = torch.softmax(sim_emb_emb_teacher_proxy / temp_proto_teacher, dim=-1) # [V, N, P]
pos_weights_teacher_protos = generate_masks_teacher_protos(
coord_grids_teacher=coord_grids_teacher_reduced[valid_entries, ...],
coord_grids_protos=coords_protos,
embedding_size_teacher=embeddings_teacher_reduced.shape[2:],
fwhm=self.fwhm_teacher_protos,
)
sim_emb_emb_teacher_proxy_soft_final = sim_emb_emb_teacher_proxy_soft * pos_weights_teacher_protos if self.use_weighting_teacher else sim_emb_emb_teacher_proxy_soft
if idx_frame == 0:
assignments = einops.rearrange(sim_emb_emb_teacher_proxy_soft_final, 'v (h w d) p -> v p h w d', h=embeddings_teacher_reduced.shape[2], w=embeddings_teacher_reduced.shape[3], d=embeddings_teacher_reduced.shape[4])
assignments_nonweighted = einops.rearrange(sim_emb_emb_teacher_proxy_soft, 'v (h w d) p -> v p h w d', h=embeddings_teacher_reduced.shape[2], w=embeddings_teacher_reduced.shape[3], d=embeddings_teacher_reduced.shape[4])
plots['sim_teacher_proxy'] = F.interpolate(assignments[..., assignments.shape[-1] // 2: assignments.shape[-1] // 2 + 1], scale_factor=(self.reduction_factor, self.reduction_factor, 1))
plots['sim_teacher_proxy_nonweighted'] = F.interpolate(assignments_nonweighted[..., assignments_nonweighted.shape[-1] // 2: assignments_nonweighted.shape[-1] // 2 + 1], scale_factor=(self.reduction_factor, self.reduction_factor, 1))
for idx_student in range(n_students):
embeddings_student_valid_ = einops.rearrange(embeddings_students_reduced[idx_student][valid_entries, ...], 'v c h w d -> v (h w d) c') # [B, N, C]
embeddings_student_valid_normed_ = F.normalize(embeddings_student_valid_, p=2, dim=-1) # [B, C, H, W, D]. Normalize (for cosine sim)
sim_emb_emb_student_proxy_ = torch.einsum('v n c, v p c -> v n p', embeddings_student_valid_normed_, embeddings_protos_valid_normed) # [V, N, P]
sim_emb_emb_student_proxy_soft_ = torch.softmax(sim_emb_emb_student_proxy_ / temp_proto_student, dim=-1)
# sim_emb_emb_student_proxy_soft_weighted = sim_emb_emb_student_proxy_soft_ * pos_weights_student_protos if self.use_weighting else sim_emb_emb_student_proxy_soft_
# Loss calc
for idx_valid in range(n_valid):
if any(mask_max_sim_dist[idx_student][idx_valid, ...]):
sim_emb_emb_soft_selected_ = sim_emb_emb_student_proxy_soft_[idx_valid, ...][mask_max_sim_dist[idx_student][idx_valid, :], :] # [N_sel, P]
cluster_assignments_selected_ = sim_emb_emb_teacher_proxy_soft_final[idx_valid, ...][indices_closest[idx_student][idx_valid, :], :][mask_max_sim_dist[idx_student][idx_valid, :], :] # [N_sel, P]. Take the closest teacher->proxy assignment for each student position.
ce_clustered = - (cluster_assignments_selected_ * torch.clamp(torch.log(sim_emb_emb_soft_selected_ + 1e-16), min=-1e3, max=-0.)).sum(dim=1).mean(dim=0)
entropy_all = ce_clustered # Currently, mean entropy isn't included.
loss_sim_clustered[idx_student] = torch.concat([loss_sim_clustered[idx_student], entropy_all.reshape(-1)])
for idx_student in range(n_students):
losses[f'contrastive_proxy_sim_clustered_s{idx_student}'] = self.loss_weight * loss_sim_clustered[idx_student].mean() if loss_sim_clustered[idx_student].shape[0] > 0 else torch.tensor(0., device=device_)
with torch.no_grad():
# Calculate plotted similarities - across non-pairs (for illustration)
# Position is atm hardcoded
embeddings_plot_candidates = embeddings_students[idx_student][...,
embeddings_students[idx_student].shape[2] // 2 - 5: embeddings_students[idx_student].shape[2] // 2 + 5,
embeddings_students[idx_student].shape[3] // 2 - 5: embeddings_students[idx_student].shape[3] // 2 + 5,
# embeddings_students[idx_student].shape[2] // 4 - 5: embeddings_students[idx_student].shape[2] // 4 + 5,
# int(embeddings_students[idx_student].shape[3] / 1.5 - 5): int(embeddings_students[idx_student].shape[3] / 1.5 + 5),
:]
embeddings_plot_candidates_shape = embeddings_plot_candidates.shape[2:]
embeddings_plot_candidates = einops.rearrange(embeddings_plot_candidates, 'b c h w d -> b (h w d) c')
embeddings_teacher = einops.rearrange(embeddings_teacher_reduced, 'b c h w d -> b (h w d) c') # [B, N, C]
embeddings_teacher_normed = F.normalize(embeddings_teacher, p=2, dim=-1)
# Save exemplary similarities of student to teachers
sim_emb_emb_student_proxy_plot = torch.einsum('b n c, b m c -> b n m', embeddings_plot_candidates, embeddings_teacher_normed)
plots[f'sim_student_teacher_s{idx_student}'] = sim_emb_emb_student_proxy_plot.reshape(n_batch, *embeddings_plot_candidates_shape, *embeddings_teacher_reduced.shape[2:]) # Atm passes non-softmaxed similarities
return losses, plots
def generate_masks_student_teacher(coord_grids_student: List[torch.Tensor],
coord_grids_teacher: torch.Tensor,
embedding_size: List[Tuple[int, int, int]],
embedding_size_teacher: Tuple[int, int, int],
fwhm: float = 256.,
max_sim_dist: Tuple[float, ...] = (4., 2.),
scale_z: float = 2.0,
thresh: float = 0.5):
# x,y,z position differences - student to teacher
pos_masks_student_teacher = list()
indices_closest = list()
mask_max_sim_dist = list()
coord_grids_teacher_zoomed = F.interpolate(coord_grids_teacher, size=embedding_size_teacher, mode='trilinear') # resize to feature map size
for idx_student in range(len(coord_grids_student)):
coord_grids_student_zoomed = F.interpolate(coord_grids_student[idx_student], size=embedding_size[idx_student], mode='trilinear') # resize to feature map size
diff_xyz = (einops.rearrange(coord_grids_student_zoomed, 'b c h w d -> c b (h w d) ()') - einops.rearrange(coord_grids_teacher_zoomed, 'b c h w d -> c b () (h w d)')) # [3, B, N1, N2].
diff_xyz[2, ...] *= scale_z # penalize coord diff in z-direction more strongly.
diff_all = torch.linalg.norm(diff_xyz[:3, ...], ord=2, dim=0) # [B, N1, N2]
sigma_squared = (fwhm / 2.355)**2 # FWHM ~= 2.355*sigma
pos_masks_student_teacher.append(torch.exp(- diff_all ** 2 / (2 * sigma_squared)) >= thresh) # [B, N1, N2]. Weights are compared to threshold to produce binary mask.
pos_minimum, indices_closest_ = torch.min(diff_all, dim=-1) # [B, N1], [B, N1].
indices_closest.append(indices_closest_)
mask_max_sim_dist.append(pos_minimum <= max_sim_dist[0]) # [B, N1].
return pos_masks_student_teacher, indices_closest, mask_max_sim_dist
def generate_masks_teacher_protos(coord_grids_teacher: torch.Tensor,
coord_grids_protos: torch.Tensor,
embedding_size_teacher: Tuple[int, int, int],
fwhm: float = 256.,
scale_z: float = 2.0):
coord_grids_teacher_zoomed = F.interpolate(coord_grids_teacher, size=embedding_size_teacher, mode='trilinear') # resize to feature map size
# x,y,z position differences - teacher to prototype surrogates
diff_xyz = (einops.rearrange(coord_grids_teacher_zoomed, 'b c h w d -> c b (h w d) ()') - einops.rearrange(coord_grids_protos, 'b n c -> c b () n')) # [3, B, N1, N2]. Protos are already in node shape.
diff_xyz[2, ...] *= scale_z # penalize coord diff in z-direction more strongly.
diff_all = torch.linalg.norm(diff_xyz[:3, ...], ord=2, dim=0) # [B, N1, N2]
sigma_squared = (fwhm / 2.355)**2 # FWHM ~= 2.355*sigma
pos_weights_teacher_protos = torch.exp(- diff_all ** 2 / (2 * sigma_squared)) # [B, N1, N2]. True weights (not masks).
return pos_weights_teacher_protos
| Python |
3D | marcdcfischer/PUNet | src/modules/losses/focal.py | .py | 2,939 | 57 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Optional
class FocalLoss(nn.Module):
def __init__(self,
out_channels: int,
loss_weight: float = 1.,
alpha_background: float = 0.1,
alpha_foreground: float = 0.1,
additive_alpha: Tuple[float, ...] = (0.0, 0.9),
gamma: float = 1.5):
super().__init__()
self.loss_weight = loss_weight
self.gamma = gamma
self.out_channels = out_channels
self.alpha_background = alpha_background
self.alpha_foreground = alpha_foreground
self.additive_alpha = additive_alpha
def forward(self,
predictions: torch.Tensor,
targets: torch.Tensor,
label_indices_active: Optional[torch.Tensor] = None,
tag: str = 'seg'):
"""
:param predictions: logits [B, C, H, W, D]
:param targets: int tensor [B, H, W, D]
:param label_indices_active: [B, C]
:return:
"""
assert predictions[:, 0, ...].shape == targets.shape
losses = dict()
log_softmax = torch.clamp(F.log_softmax(predictions, dim=1), min=-1e3)
log_prob_weighted, log_prob_nonweighted = list(), list()
for idx_batch in range(targets.shape[0]):
loss_weight_alpha = torch.tensor([self.alpha_background] + [self.alpha_foreground for _ in range(predictions.shape[1] - 1)], dtype=torch.float, device=predictions.device)
loss_weight_alpha += torch.tensor(torch.tensor(self.additive_alpha, dtype=torch.float, device=predictions.device)[torch.nonzero(label_indices_active[idx_batch, :], as_tuple=False).squeeze()] if label_indices_active is not None else self.additive_alpha, dtype=torch.float, device=predictions.device)
assert predictions.shape[1] == loss_weight_alpha.shape[0]
log_prob_weighted.append(F.nll_loss(input=log_softmax[idx_batch: idx_batch+1, ...],
target=targets[idx_batch: idx_batch+1, ...],
weight=loss_weight_alpha,
reduction='none')[0, ...])
log_prob_nonweighted.append(F.nll_loss(input=log_softmax[idx_batch: idx_batch+1, ...],
target=targets[idx_batch: idx_batch+1, ...],
weight=None,
reduction='none')[0, ...])
log_prob_weighted, log_prob_nonweighted = torch.stack(log_prob_weighted, dim=0), torch.stack(log_prob_nonweighted, dim=0)
prob = torch.exp(-log_prob_nonweighted)
losses[tag] = self.loss_weight * (torch.clamp(1. - prob, min=0.0) ** self.gamma * log_prob_weighted).mean()
return losses
| Python |
3D | marcdcfischer/PUNet | src/modules/architectures/momentum_model_simple.py | .py | 3,767 | 83 | import torch
import torch.nn as nn
from argparse import ArgumentParser, Namespace
from typing import Union, Dict, Optional, Tuple, List
from src.modules.architectures.baseline_unet import MonaiUNet
from src.modules.architectures.baseline_unetr import MonaiUNETR
from src.modules.architectures.baseline_swin_unetr import MonaiSwinUNETR
from src.modules.architectures.swin_unetr_deep import DeepSIAUNetr
class MomentumModelSimple(nn.Module):
def __init__(self,
conf: Union[Dict, Namespace]):
super().__init__()
self.conf = conf
self.current_tau_instructions = conf.initial_tau_instructions
self.current_tau_body = conf.initial_tau_body
# Architecture
if self.conf.architecture.lower() == 'wip_simple':
self.architecture = DeepSIAUNetr
elif self.conf.architecture.lower() == 'unet':
self.architecture = MonaiUNet
elif self.conf.architecture.lower() == 'unetr':
self.architecture = MonaiUNETR
elif self.conf.architecture.lower() == 'swin_unetr':
self.architecture = MonaiSwinUNETR
else:
raise NotImplementedError(f'The selected architecture {self.conf.architecture} is not available.')
# Base architecture (x2). Anything in there will be replicated 2x.
self.network_student = self.architecture(conf=self.conf)
self.network_teacher = self.architecture(conf=self.conf)
# Overwrite teacher initialization with teachers one and disable gradients
for (name, param_student_), (_, param_teacher_) in zip(
self.network_student.named_parameters(),
self.network_teacher.named_parameters(),
):
param_teacher_.data.copy_(param_student_.data) # initialize teacher with identical data as student
param_teacher_.requires_grad = False # Do not update by gradient
def forward(self,
x: List[torch.Tensor],
x_teacher: Optional[torch.Tensor] = None):
dict_out_students = [self.network_student(x_) for x_ in x]
dict_out_teacher = None
if x_teacher is not None:
dict_out_teacher = self.network_teacher(x_teacher)
return dict_out_students, dict_out_teacher
def update_teacher(self):
# Apply momentum weight update
# Note: batch norms are in general left untouched (i.e. are updated separately)
for (name, param_student_), (_, param_teacher_) in zip(
self.network_student.named_parameters(),
self.network_teacher.named_parameters(),
):
param_teacher_.data = self.current_tau_body * param_teacher_.data + (1 - self.current_tau_body) * param_student_.data
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
hparams_tmp = parser.parse_known_args()[0]
if hparams_tmp.architecture.lower() == 'wip_simple':
parser = DeepSIAUNetr.add_model_specific_args(parser)
elif hparams_tmp.architecture.lower() == 'unet':
parser = MonaiUNet.add_model_specific_args(parser)
elif hparams_tmp.architecture.lower() == 'unetr':
parser = MonaiUNETR.add_model_specific_args(parser)
elif hparams_tmp.architecture.lower() == 'swin_unetr':
parser = MonaiSwinUNETR.add_model_specific_args(parser)
else:
raise NotImplementedError(f'The selected architecture {hparams_tmp.architecture} is not available.')
# Momentum
parser.add_argument('--initial_tau_instructions', default=0.99, type=float)
parser.add_argument('--initial_tau_body', default=0.99, type=float)
return parser
| Python |
3D | marcdcfischer/PUNet | src/modules/architectures/swin_unetr_deep.py | .py | 28,913 | 456 | import einops
import torch
import torch.nn as nn
import torch.nn.functional as F
from argparse import ArgumentParser, Namespace
from typing import Union, Dict, Optional, Tuple, List
from monai.networks.blocks import Convolution, ResidualUnit, UnetResBlock, UnetUpBlock
from src.modules.blocks.sia_res_block_deep import DeepSIAResBlock
from src.modules.blocks.sia_up_block_deep import DeepSIAUpBlock
from src.modules.blocks.instruction_pool import InstructionPool
from src.modules.blocks.similarity_aggregation import similarity_aggregation
import collections
from einops import rearrange
import math
from monai.networks.layers.utils import get_act_layer, get_norm_layer
import warnings
from itertools import chain
class DeepSIAUNetr(nn.Module):
def __init__(self,
conf: Union[Dict, Namespace]):
super().__init__()
self.conf = conf
self.bias_vit = self.conf.bias_vit
# Segmentation instructions (atm one token per class)
assert self.conf.instruction_pool_size >= self.conf.out_channels
self.separate_background = self.conf.separate_background and self.conf.label_indices_max_active + 1 < 2
if self.separate_background:
print('Using separate background tokens for each foreground class.')
else:
print('Using shared background tokens for all foreground classes.')
self.tokens_per_instruction_combined = 2 * self.conf.tokens_per_instruction if self.separate_background else self.conf.tokens_per_instruction
# Make sure variants are configured (somewhat) properly
assert self.conf.instruction_channels == self.conf.hidden_channels[0]
if 'prompting' in self.conf.adaptation_variant.lower():
assert self.conf.fixed_output is False
else:
assert self.conf.fixed_output is True
# Make sure dimensions are divisible at least by 2 or are equal to 1
for patch_size_ in ([self.conf.patch_size_teacher, *self.conf.patch_size_students]):
assert all([x_ % 2 == 0 or x_ == 1 for x_ in patch_size_])
# Architecture
# CNN Encoder (with only one downsampling)
self.depth_kernel_size_cnn = [3 if self.conf.patch_size_students[0][2] >= 3. else 1 for _ in range(self.conf.depth_cnn_encoder)]
# self.depth_stride_cnn = [2 if self.conf.patch_size_students[0][2] > 1. else 1 for _ in range(self.conf.depth_cnn_encoder)]
self.encoding_res_blocks = nn.ModuleList()
for idx_block in range(self.conf.depth_cnn_encoder):
self.encoding_res_blocks.append(UnetResBlock(
spatial_dims=3,
in_channels=self.conf.hidden_channels[0] if idx_block > 0 else self.conf.in_channels,
out_channels=self.conf.hidden_channels[0],
kernel_size=(3, 3, self.depth_kernel_size_cnn[idx_block]),
stride=(1, 1, 1) if idx_block + 1 < self.conf.depth_cnn_encoder else (2, 2, 1), # Downsampling only for last CNN layer and only intra-plane
norm_name="batch",
dropout=0.0,
))
# (Optional) ViT position encoding
self.max_patches = [math.ceil(x_ / 2) for x_ in self.conf.patch_size_teacher] # Teacher is expected to have the highest size.
if self.bias_vit:
self.vit_pos_bias_content = nn.Parameter(nn.init.xavier_uniform_(torch.empty((1, math.prod(self.max_patches), self.conf.hidden_channels[0])),
gain=nn.init.calculate_gain('linear')), requires_grad=True) # monai used truncated normal
self.vit_pos_bias_instructions = nn.Parameter(nn.init.xavier_uniform_(torch.empty((1, 1, self.conf.hidden_channels[0])),
gain=nn.init.calculate_gain('linear')), requires_grad=True)
# UNet Encoder (with SIA blocks)
self.depth_kernel_size_unet = [3 if self.conf.patch_size_students[0][2] / 2 ** idx_block >= 3. else 1 for idx_block in range(self.conf.depth_sia_encoder)]
self.depth_stride_unet = [1] + [2 if self.conf.patch_size_students[0][2] / 2 ** idx_block > 1. else 1 for idx_block in range(1, self.conf.depth_sia_encoder)]
# self.depth_scale_factor_unet = [2. if self.conf.patch_size_students[0][2] / 2 ** idx_block > 1. else 1 for idx_block in range(self.conf.depth_sia_encoder)]
self.sia_res_blocks = nn.ModuleList()
self.sia_res_instruction_blocks_v2 = nn.ModuleList()
for idx_block in range(self.conf.depth_sia_encoder):
print(f'Encoder - Using kernel size {(3, 3, self.depth_kernel_size_unet[idx_block])}, strides {(2, 2, self.depth_stride_unet[idx_block]) if idx_block > 0 else (1, 1, 1)}.')
self.sia_res_blocks.append(DeepSIAResBlock(
channels_in=self.conf.hidden_channels[idx_block - 1] if idx_block > 0 else self.conf.hidden_channels[idx_block] if self.conf.depth_cnn_encoder > 0 else self.conf.in_channels,
channels_out=self.conf.hidden_channels[idx_block],
instruction_pool_size=self.conf.instruction_pool_size, # kind of an upperlimit.
tokens_per_instruction=self.tokens_per_instruction_combined,
separate_background=self.conf.separate_background,
kernel_size=(3, 3, self.depth_kernel_size_unet[idx_block]),
strides=(2, 2, self.depth_stride_unet[idx_block]) if idx_block > 0 else (1, 1, 1),
heads=self.conf.attention_heads,
window_size=self.conf.attn_window_size,
unique_instruction_bias=self.conf.unique_instruction_bias,
unique_token_bias=self.conf.unique_token_bias,
no_bias_instructions=self.conf.no_bias_instructions,
no_bias_content=self.conf.no_bias_content,
adapter=self.conf.adaptation_variant.lower() == 'adapter',
)) # TODO: Make same check as e.g. for depth_kernel_size for passed window_size entries
if ((idx_block == 0 and self.conf.prompting_variant.lower() in ['start', 'encoder', 'full'])\
or (idx_block > 0 and self.conf.prompting_variant.lower() in ['encoder', 'full']))\
and not self.conf.fixed_output:
for idx_sub_block_ in range(2): # w and sw
self.sia_res_instruction_blocks_v2.append(InstructionPool(
instruction_pool_size=self.conf.instruction_pool_size,
hidden_channels=self.conf.hidden_channels[idx_block],
default_instructions=self.conf.out_channels,
tokens_per_instruction=self.tokens_per_instruction_combined,
separate_background=self.conf.separate_background,
use_norm=self.conf.instructions_use_norm,
elementwise_affine=self.conf.instructions_elementwise_affine
))
else:
self.sia_res_instruction_blocks_v2.extend([nn.Module(), nn.Module()])
# UNet Decoder (with SIA blocks)
self.sia_up_blocks = nn.ModuleList()
self.sia_up_instruction_blocks_v2 = nn.ModuleList()
for idx_block in range(self.conf.depth_sia_decoder):
print(f'Decoder - Using kernel size {(3, 3, list(reversed(self.depth_kernel_size_unet))[idx_block])}, strides {(2, 2, list(reversed(self.depth_stride_unet))[idx_block])}.')
self.sia_up_blocks.append(DeepSIAUpBlock(
channels_in=list(reversed(self.conf.hidden_channels[:len(self.sia_res_blocks)]))[idx_block],
channels_out=list(reversed(self.conf.hidden_channels[:len(self.sia_res_blocks)]))[idx_block + 1],
instruction_pool_size=self.conf.instruction_pool_size,
tokens_per_instruction=self.tokens_per_instruction_combined,
separate_background=self.conf.separate_background,
kernel_size=(3, 3, list(reversed(self.depth_kernel_size_unet))[idx_block]),
strides=(2, 2, list(reversed(self.depth_stride_unet))[idx_block]),
heads=self.conf.attention_heads,
window_size=self.conf.attn_window_size,
unique_instruction_bias=self.conf.unique_instruction_bias,
unique_token_bias=self.conf.unique_token_bias,
no_bias_instructions=self.conf.no_bias_instructions,
no_bias_content=self.conf.no_bias_content,
adapter=self.conf.adaptation_variant.lower() == 'adapter',
))
if self.conf.prompting_variant.lower() in ['decoder', 'full'] and not self.conf.fixed_output:
for idx_sub_block_ in range(2): # w and sw
self.sia_up_instruction_blocks_v2.append(InstructionPool(
instruction_pool_size=self.conf.instruction_pool_size,
hidden_channels=list(reversed(self.conf.hidden_channels[:len(self.sia_res_blocks)]))[idx_block + 1],
default_instructions=self.conf.out_channels,
tokens_per_instruction=self.tokens_per_instruction_combined,
separate_background=self.conf.separate_background,
use_norm=self.conf.instructions_use_norm,
elementwise_affine=self.conf.instructions_elementwise_affine
))
else:
self.sia_up_instruction_blocks_v2.extend([nn.Module(), nn.Module()])
# Final upsampling
if self.conf.depth_cnn_encoder > 0:
self.up = nn.Upsample(scale_factor=(2, 2, 1), mode='trilinear', align_corners=False)
# Last pool is always active (otherwise no sim comparison is possible)
# I.e. this pool exists regardless of start, end, encoder, decoder, full variants
if not self.conf.fixed_output:
self.instruction_pool = InstructionPool(instruction_pool_size=self.conf.instruction_pool_size,
hidden_channels=self.conf.instruction_channels,
default_instructions=self.conf.out_channels,
tokens_per_instruction=self.tokens_per_instruction_combined,
separate_background=self.separate_background,
use_norm=self.conf.instructions_use_norm,
elementwise_affine=self.conf.instructions_elementwise_affine)
# Fixed output (ablation)
else:
self.norm_fixed = get_norm_layer(name='batch',
spatial_dims=3,
channels=self.conf.hidden_channels[0])
self.conv_fixed = nn.Conv3d(in_channels=self.conf.hidden_channels[0],
out_channels=self.conf.label_indices_max_active + 1 if self.conf.label_indices_max_active > 0 else self.conf.out_channels,
kernel_size=(1, 1, 1))
# Mean rep initialization
if self.conf.mean_initialization and self.conf.downstream:
self.set_downstream_instruction_parameters(label_indices_base=self.conf.label_indices_base,
label_indices_downstream=self.conf.label_indices_downstream_active)
def get_named_cnn_encoder_parameters(self):
params_ = list(self.encoding_res_blocks.named_parameters())
return params_
def get_named_fixed_parameters(self):
params_ = []
if self.conf.fixed_output:
params_ = list(chain(*[self.norm_fixed.named_parameters(), self.conv_fixed.named_parameters()]))
return params_
def get_named_encoder_parameters(self):
params_ = list(chain(*[x_.named_parameters_body() for x_ in self.sia_res_blocks]))
return params_
def get_named_decoder_parameters(self):
params_ = list(chain(*[x_.named_parameters_body() for x_ in self.sia_up_blocks]))
return params_
def get_named_body_parameters(self):
params_ = list(chain(*[self.get_named_encoder_parameters(), self.get_named_decoder_parameters(), self.get_named_cnn_encoder_parameters(), self.get_named_fixed_parameters()]))
return params_
def get_named_instruction_bias_parameters(self):
sia_res_blocks_instruction_bias_params = list(chain(*[x_.named_parameters_bias_instructions() for x_ in self.sia_res_blocks]))
sia_up_blocks_instruction_bias_params = list(chain(*[x_.named_parameters_bias_instructions() for x_ in self.sia_up_blocks]))
params_ = [*sia_res_blocks_instruction_bias_params, *sia_up_blocks_instruction_bias_params]
return params_
def get_named_instruction_pool_parameters(self):
params_ = list(chain(*[self.sia_res_instruction_blocks_v2.named_parameters(), self.sia_up_instruction_blocks_v2.named_parameters()]))
if not self.conf.fixed_output:
params_ += list(self.instruction_pool.named_parameters())
return params_
def get_named_instruction_parameters(self):
params_ = list(chain(*[self.get_named_instruction_pool_parameters(), self.get_named_instruction_bias_parameters()]))
return params_
def get_named_adapter_parameters(self):
params_ = list(chain(*[x_.named_parameters_adapter() for x_ in self.sia_res_blocks]))
params_ += list(chain(*[x_.named_parameters_adapter() for x_ in self.sia_up_blocks]))
return params_
def set_requires_gradient(self,
grad_instructions: torch.BoolTensor,
grad_instructions_norm: bool = True,
grad_instructions_scores: bool = True,
grad_body: bool = True):
# Disable everything (as default)
debug = True
for (name_, param_student_) in self.named_parameters():
param_student_.requires_grad = False
if debug:
# Make sure every parameter is included in any of the submodules
if not any([param_student_.data_ptr() == x_[1].data_ptr() for x_ in [*self.get_named_body_parameters(), *self.get_named_instruction_parameters(), *self.get_named_adapter_parameters()]]):
raise ValueError(f'Missing parameter {name_}')
# Enable or disable gradients for bulk of interpreter (student) - dependent on selective freezing
for (name_, param_student_) in self.get_named_body_parameters():
param_student_.requires_grad = grad_body
# Last layer
if self.conf.adaptation_variant in ['fixed', 'decoder', 'bias', 'adapter']:
for (name_, param_student_) in self.get_named_fixed_parameters():
param_student_.requires_grad = True
# Adapter layers
if self.conf.adaptation_variant in ['adapter']:
for (name_, param_student_) in self.get_named_adapter_parameters():
param_student_.requires_grad = True
# Decoder
if self.conf.adaptation_variant in ['decoder']:
for (name_, param_student_) in self.get_named_decoder_parameters():
param_student_.requires_grad = True
# Enable or disable gradients for bias params in bulk of interpreter
if self.conf.adaptation_variant in ['bias', 'bias_prompting']:
for (name_, param_student_) in self.get_named_body_parameters():
if 'bias' in name_ or 'norm' in name_: # Includes bias + scale parameters of norms
param_student_.requires_grad = True
# Enable or disable gradients for all instruction parameters
if self.conf.adaptation_variant in ['prompting', 'bias_prompting']:
# Only active instruction bias scores are adjusted.
for (name_, param_student_) in self.get_named_instruction_bias_parameters():
if 'encoding_cross_inst_content' in name_:
if int(name_[-1]) < grad_instructions.shape[0]:
param_student_.requires_grad = grad_instructions[int(name_[-1])].item() & grad_instructions_scores
else:
param_student_.requires_grad = False # Excess instruction remain unused and therefore default False
else:
param_student_.requires_grad = grad_instructions_scores
# Fine-grained instruction pool adjustments
for (name_, param_student_) in self.get_named_instruction_pool_parameters():
# Set token parameter
if 'instructions_norm' in name_: # norm weights and bias
param_student_.requires_grad = grad_instructions_norm
else: # instructions.0+
if int(name_[-1]) < grad_instructions.shape[0]:
param_student_.requires_grad = grad_instructions[int(name_[-1])].item()
else:
param_student_.requires_grad = False # Excess instruction remain unused and therefore default False
# Report frozen / nonfrozen
def _report_trainable(key_, named_params):
print(f"Trainable are {sum([p_[1].numel() for p_ in named_params if p_[1].requires_grad])}/{sum([p_[1].numel() for p_ in named_params])} {key_} parameters.")
print(f"Trainable parameters for adaptation variant {self.conf.adaptation_variant}.")
_report_trainable('cnn encoder', self.get_named_cnn_encoder_parameters())
_report_trainable('encoder', self.get_named_encoder_parameters())
_report_trainable('decoder', self.get_named_decoder_parameters())
_report_trainable('fixed layer', self.get_named_fixed_parameters())
_report_trainable('body', self.get_named_body_parameters())
_report_trainable('instruction bias', self.get_named_instruction_bias_parameters()) # Note: amount of truly active bias parameters may be less (for position ablations).
_report_trainable('instruction pool', self.get_named_instruction_pool_parameters())
_report_trainable('instruction', self.get_named_instruction_parameters())
_report_trainable('adapter', self.get_named_adapter_parameters())
_report_trainable('all', list(self.named_parameters()))
def set_downstream_instruction_parameters(self, label_indices_base: List[int], label_indices_downstream: List[int]):
if len(label_indices_base) > 0:
print('Performing initialization of instructions based on mean representation.')
mean_rep = torch.mean(torch.stack(list(self.instruction_pool.instruction_tokens.instructions), dim=0)[label_indices_base, ...], dim=0) # Mean rep of existing foreground categories
for idx_instruction, tokens_ in enumerate(self.instruction_pool.instruction_tokens.instructions):
if idx_instruction in label_indices_downstream:
if idx_instruction in label_indices_base:
warnings.warn(f'Initializing downstream instruction {idx_instruction} with mean rep despite it being present in label_indices_base. (Ignore if intended.)')
tokens_.data.copy_(mean_rep)
else:
print('Initialization of instructions remains random since no base labels are available.')
def forward(self,
x: torch.Tensor,
label_indices: Optional[torch.Tensor] = None,
pseudo_indices_subject: Optional[torch.Tensor] = None,
pseudo_indices_label: Optional[torch.Tensor] = None,
mode_label: str = 'pseudo',
mode_loss: str = 'both'): # pseudo or label
dict_out = collections.defaultdict(dict)
# Fetch instructions
# Note: mode does not have an effect atm. Pseudo does not exist for deep (since it would be too large)
# CNN Encoding (with only one downsampling step)
for idx_block in range(self.conf.depth_cnn_encoder):
x = self.encoding_res_blocks[idx_block](x)
if self.bias_vit:
vit_pos_bias_ = rearrange(self.vit_pos_bias_content, 'b (h w d) c -> b c h w d', h=self.max_patches[0], w=self.max_patches[1], d=self.max_patches[2])
x = x + vit_pos_bias_[...,
math.floor((vit_pos_bias_.shape[2] - x.shape[2]) / 2): vit_pos_bias_.shape[2] - math.ceil((vit_pos_bias_.shape[2] - x.shape[2]) / 2),
math.floor((vit_pos_bias_.shape[3] - x.shape[3]) / 2): vit_pos_bias_.shape[3] - math.ceil((vit_pos_bias_.shape[3] - x.shape[3]) / 2),
math.floor((vit_pos_bias_.shape[4] - x.shape[4]) / 2): vit_pos_bias_.shape[4] - math.ceil((vit_pos_bias_.shape[4] - x.shape[4]) / 2)]
# x_instructions = x_instructions + self.vit_pos_bias_instructions
# Transformer processing
# Encoder
dict_out['dense']['skips'] = list()
dict_out['instructions']['skips'] = list()
for idx_block in range(self.conf.depth_sia_encoder):
if mode_loss == 'self'\
or (self.conf.noninstructed_attention and not self.conf.downstream)\
or (self.conf.noninstructed_attention_downstream and self.conf.downstream)\
or not isinstance(self.sia_res_instruction_blocks_v2[2 * idx_block], InstructionPool): # no instructions for mode_loss == 'self' (only)
x = self.sia_res_blocks[idx_block](x=x,
x_instructions=None,
label_indices=None)
else:
x_instructions = [self.sia_res_instruction_blocks_v2[2 * idx_block](label_indices, batch_size=x.shape[0]),
self.sia_res_instruction_blocks_v2[2 * idx_block + 1](label_indices, batch_size=x.shape[0])]
x = self.sia_res_blocks[idx_block](x=x,
x_instructions=x_instructions,
label_indices=label_indices)
if idx_block + 1 < self.conf.depth_sia_encoder:
dict_out['dense']['skips'].append(x)
# Decoder
for idx_block in range(len(self.sia_up_blocks)):
if mode_loss == 'self'\
or (self.conf.noninstructed_attention and not self.conf.downstream)\
or (self.conf.noninstructed_attention_downstream and self.conf.downstream)\
or not isinstance(self.sia_up_instruction_blocks_v2[2 * idx_block], InstructionPool):
x = self.sia_up_blocks[idx_block](x=x,
x_skips=list(reversed(dict_out['dense']['skips']))[idx_block],
x_instructions=None,
label_indices=None)
else:
x_instructions = [self.sia_up_instruction_blocks_v2[2 * idx_block](label_indices, batch_size=x.shape[0]),
self.sia_up_instruction_blocks_v2[2 * idx_block + 1](label_indices, batch_size=x.shape[0])]
x = self.sia_up_blocks[idx_block](x=x,
x_skips=list(reversed(dict_out['dense']['skips']))[idx_block],
x_instructions=x_instructions,
label_indices=label_indices)
dict_out['patched']['embedded_latents'] = x
# Segmentation recombination
if not self.conf.fixed_output:
x_instructions_final = self.instruction_pool(label_indices, batch_size=x.shape[0])
dict_out['instructions']['segmentation_latents'] = x_instructions_final
h_, w_, d_ = dict_out['patched']['embedded_latents'].shape[-3:]
x_sim_latents = einops.rearrange(dict_out['patched']['embedded_latents'], 'b c h w d -> b (h w d) c')
x_sim_instructions = einops.rearrange(dict_out['instructions']['segmentation_latents'], 'b (i n) c -> b i n c', n=self.conf.tokens_per_instruction) # [B, I, N, C]. Should add up to the same form regardless of self.separate_background (for binary case).
x_sim = similarity_aggregation(latents=x_sim_latents,
instructions=x_sim_instructions,
mean_aggregation=self.conf.mean_aggregation,
top_k_selection=self.conf.top_k_selection,
soft_selection_sigma=self.conf.soft_selection_sigma)
x_sim = einops.rearrange(x_sim, 'b i (h w d) -> b i h w d', h=h_, w=w_, d=d_)
else:
dict_out['instructions']['segmentation_latents'] = None
# assert self.conf.architecture == 'wip_simple' # Should only be used in conjunction with simple (multiclass) case.
x_sim = self.conv_fixed(F.leaky_relu(self.norm_fixed(dict_out['patched']['embedded_latents']), inplace=True))
dict_out['dense']['embedded_latents'] = self.up(x_sim) if self.conf.depth_cnn_encoder > 0 else x_sim
return dict_out
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--depth_cnn_encoder', default=1, type=int)
parser.add_argument('--depth_sia_encoder', default=5, type=int)
parser.add_argument('--depth_sia_decoder', default=4, type=int)
parser.add_argument('--instruction_pool_size', default=10, type=int)
parser.add_argument('--instruction_pool_size_pseudo_subjects', default=100, type=int)
parser.add_argument('--instruction_pool_size_pseudo_labels', default=51, type=int)
parser.add_argument('--instruction_channels', default=32, type=int)
parser.add_argument('--tokens_per_instruction', default=16, type=int)
# parser.add_argument('--tokens_per_background', default=20, type=int)
parser.add_argument('--attention_heads', default=8, type=int)
parser.add_argument('--attn_window_size', default=[8, 8, 1], nargs=3, type=int)
parser.add_argument('--hidden_channels', default=[32, 64, 128, 256, 384], nargs='*', type=int)
# Instruction initialization / aggregation
parser.add_argument('--noninstructed_attention', action='store_true') # Attention layers are not instructed
parser.add_argument('--noninstructed_attention_downstream', action='store_true')
parser.add_argument('--top_k_selection', action='store_true') # True: Aggregate via softmax re-weighting, False: Mean over topk (atm 3)
parser.add_argument('--soft_selection_sigma', default=0.1, type=float) # Temperature for softmax re-weighting
parser.add_argument('--mean_aggregation', action='store_true') # Aggregate instructions without any sophisticated selection
parser.add_argument('--mean_initialization', default=False, type=bool) # Use mean representation of learned (base) categories for initialization of new (unseen) downstream category
parser.add_argument('--fixed_output', action='store_true') # Fixed linear output layer instead of cosine similarity matching with instructions.
parser.add_argument('--instructions_use_norm', default=False, type=bool) # Norm all instructions in a pool by a common norm.
parser.add_argument('--instructions_elementwise_affine', default=True, type=bool) # Enable / disable learning of extra norm params for instructions.
parser.add_argument('--prompting_variant', default='full', type=str, choices=['start', 'end', 'encoder', 'decoder', 'full'])
parser.add_argument('--adaptation_variant', default='prompting', type=str, choices=['prompting', 'fixed', 'decoder', 'bias', 'adapter', 'bias_prompting'])
# Attention bias scheme
parser.add_argument('--unique_instruction_bias', default=True, type=bool) # If True each Instruction has a unique bias score.
parser.add_argument('--unique_token_bias', default=True, type=bool) # If True each Token (across all instructions) has a unique bias score. IF False all bias scores are the same regardless of the token.
parser.add_argument('--no_bias_instructions', action='store_true')
parser.add_argument('--no_bias_content', action='store_true')
parser.add_argument('--bias_vit', action='store_true')
return parser
| Python |
3D | marcdcfischer/PUNet | src/modules/architectures/__init__.py | .py | 0 | 0 | null | Python |
3D | marcdcfischer/PUNet | src/modules/architectures/momentum_model.py | .py | 4,150 | 84 | import torch
import torch.nn as nn
from argparse import ArgumentParser, Namespace
from typing import Union, Dict, Optional, Tuple, List
# from src.modules.architectures.interpreter import Interpreter as Architecture
from src.modules.architectures.swin_unetr_deep import DeepSIAUNetr
import warnings
class MomentumModel(nn.Module):
def __init__(self,
conf: Union[Dict, Namespace]):
super().__init__()
self.conf = conf
self.current_tau_instructions = conf.initial_tau_instructions
self.current_tau_body = conf.initial_tau_body
# Architecture
if self.conf.architecture_wip.lower() == 'shallow':
raise ValueError('Model has been removed, since its implementation was outdated. ')
elif self.conf.architecture_wip.lower() == 'deep':
self.architecture = DeepSIAUNetr
else:
raise NotImplementedError(f'The selected architecture {self.conf.architecture} is not available.')
# Base architecture (x2). Anything in there will be replicated 2x.
self.network_student = self.architecture(conf=self.conf)
self.network_teacher = self.architecture(conf=self.conf)
# Overwrite teacher initialization with teachers one and disable gradients
for (name, param_student_), (_, param_teacher_) in zip(
self.network_student.named_parameters(),
self.network_teacher.named_parameters(),
):
param_teacher_.data.copy_(param_student_.data) # initialize teacher with identical data as student
param_teacher_.requires_grad = False # Do not update by gradient
def forward(self,
x: List[torch.Tensor],
x_teacher: Optional[torch.Tensor] = None,
label_indices: Optional[torch.Tensor] = None,
pseudo_indices_subject: Optional[torch.Tensor] = None,
pseudo_indices_label: Optional[torch.Tensor] = None,
mode_label: str = 'label',
mode_loss: str = 'both'):
dict_out_students = [self.network_student(x_, label_indices=label_indices, pseudo_indices_subject=pseudo_indices_subject, pseudo_indices_label=pseudo_indices_label, mode_label=mode_label, mode_loss=mode_loss) for x_ in x]
dict_out_teacher = None
if x_teacher is not None:
dict_out_teacher = self.network_teacher(x_teacher, label_indices=label_indices, pseudo_indices_subject=pseudo_indices_subject, pseudo_indices_label=pseudo_indices_label, mode_label=mode_label, mode_loss=mode_loss)
return dict_out_students, dict_out_teacher
def update_teacher(self):
# Apply momentum weight update
# Note: batch norms are in general left untouched (i.e. are updated separately)
for (name, param_student_), (_, param_teacher_) in zip(
self.network_student.named_parameters(),
self.network_teacher.named_parameters(),
):
if 'instruction_pool' in name:
param_teacher_.data = self.current_tau_instructions * param_teacher_.data + (1 - self.current_tau_instructions) * param_student_.data
else:
param_teacher_.data = self.current_tau_body * param_teacher_.data + (1 - self.current_tau_body) * param_student_.data
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
# Architecture
hparams_tmp = parser.parse_known_args()[0]
if hparams_tmp.architecture_wip.lower() == 'shallow':
raise ValueError('Model has been removed, since its implementation was outdated. ')
elif hparams_tmp.architecture_wip.lower() == 'deep':
parser = DeepSIAUNetr.add_model_specific_args(parser)
else:
raise NotImplementedError(f'The selected architecture {hparams_tmp.architecture_wip} is not available.')
# Momentum
parser.add_argument('--initial_tau_instructions', default=0.99, type=float)
parser.add_argument('--initial_tau_body', default=0.99, type=float)
return parser
| Python |
3D | marcdcfischer/PUNet | src/modules/architectures/baseline_unet.py | .py | 2,029 | 57 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Union, Dict
from argparse import Namespace, ArgumentParser
from monai.networks.nets import UNet
from monai.networks.layers.utils import get_act_layer, get_norm_layer
# Monai UNet model
class MonaiUNet(nn.Module):
def __init__(self,
conf: Union[Dict, Namespace]):
super().__init__()
self.conf = conf
self.net = UNet(spatial_dims=2,
in_channels=self.conf.in_channels,
out_channels=32,
channels=(32, 64, 128, 256, 384),
strides=(1, 2, 2, 2),
kernel_size=3,
up_kernel_size=3,
num_res_units=2,
act='PRELU',
norm='BATCH',
dropout=0.0,
bias=True)
self.norm_seg = get_norm_layer(name='batch', spatial_dims=2, channels=32)
self.conv_seg = nn.Conv2d(in_channels=32,
out_channels=self.conf.out_channels,
kernel_size=(1, 1))
self.norm_emb = get_norm_layer(name='batch', spatial_dims=2, channels=32)
self.conv_emb = nn.Conv2d(in_channels=32,
out_channels=32,
kernel_size=(1, 1))
def forward(self, x: torch.Tensor):
assert x.shape[-1] == 1 # atm only 2D case allowed.
x = self.net(x[..., 0])
x_seg = self.conv_seg(F.leaky_relu(self.norm_seg(x))).unsqueeze(-1)
x_emb = self.conv_emb(F.leaky_relu(self.norm_emb(x))).unsqueeze(-1)
dict_out = {'dense': {'embedded_latents': x_seg},
'patched': {'embedded_latents': x_emb}}
return dict_out
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
return parser
| Python |
3D | marcdcfischer/PUNet | src/modules/architectures/baseline_unetr.py | .py | 3,650 | 81 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Union, Dict
from argparse import Namespace, ArgumentParser
from monai.networks.nets import UNETR
import math
from monai.networks.layers.utils import get_act_layer, get_norm_layer
# Monai UNETR model
# see https://github.com/Project-MONAI/tutorials/blob/main/3d_segmentation/unetr_btcv_segmentation_3d.ipynb for new example
class MonaiUNETR(nn.Module):
def __init__(self,
conf: Union[Dict, Namespace]):
super().__init__()
self.conf = conf
self.img_size = self.conf.patch_size_teacher
self.norm_name = 'instance' # atm hardcoded.
self.net = UNETR(in_channels=self.conf.in_channels,
out_channels=32,
img_size=self.img_size[:-1],
feature_size=16,
pos_embed='conv',
norm_name=self.norm_name,
conv_block=True,
res_block=True,
spatial_dims=2)
self.norm_seg = get_norm_layer(name='batch', spatial_dims=2, channels=32)
self.conv_seg = nn.Conv2d(in_channels=32,
out_channels=self.conf.out_channels,
kernel_size=(1, 1))
self.norm_emb = get_norm_layer(name='batch', spatial_dims=2, channels=32)
self.conv_emb = nn.Conv2d(in_channels=32,
out_channels=32,
kernel_size=(1, 1))
def forward(self, x: torch.Tensor):
# pad students to teacher size
paddings = (0, 0, 0, 0, 0, 0)
if any([x_ < y_ for x_, y_ in zip(x.shape[2:], self.img_size)]):
x_size = x.shape[2:]
paddings = [math.floor((self.img_size[0] - x_size[0]) / 2), math.ceil((self.img_size[0] - x_size[0]) / 2),
math.floor((self.img_size[1] - x_size[1]) / 2), math.ceil((self.img_size[1] - x_size[1]) / 2),
math.floor((self.img_size[2] - x_size[2]) / 2), math.ceil((self.img_size[2] - x_size[2]) / 2)]
paddings[-1] = 0 if self.img_size[2] == 1 else paddings[-1] # don't pad depth singleton dim.
x = F.pad(x, tuple(reversed(paddings)), mode='constant', value=min(x.min(), -1.)) # F.pad needs reverse order (starting from last)
assert x.shape[-1] == 1 # atm only 2D case allowed.
x = self.net(x[..., 0])
x_seg = self.conv_seg(F.leaky_relu(self.norm_seg(x))).unsqueeze(-1)
x_emb = self.conv_emb(F.leaky_relu(self.norm_emb(x))).unsqueeze(-1)
# crop students back to teacher size
if any([x_ > 0 for x_ in paddings]):
x_seg = x_seg[...,
paddings[0]: self.img_size[0] - paddings[1],
paddings[2]: self.img_size[1] - paddings[3],
paddings[4]: self.img_size[2] - paddings[5]] # F.pad needs reverse order (starting from last)
x_emb = x_emb[...,
paddings[0]: self.img_size[0] - paddings[1],
paddings[2]: self.img_size[1] - paddings[3],
paddings[4]: self.img_size[2] - paddings[5]] # F.pad needs reverse order (starting from last)
dict_out = {'dense': {'embedded_latents': x_seg},
'patched': {'embedded_latents': x_emb}}
return dict_out
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
return parser
| Python |
3D | marcdcfischer/PUNet | src/modules/architectures/baseline_swin_unetr.py | .py | 3,482 | 78 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Union, Dict
from argparse import Namespace, ArgumentParser
from monai.networks.nets import SwinUNETR
import math
from monai.networks.layers.utils import get_act_layer, get_norm_layer
# Monai UNETR model
class MonaiSwinUNETR(nn.Module):
def __init__(self,
conf: Union[Dict, Namespace]):
super().__init__()
self.conf = conf
self.img_size = self.conf.patch_size_teacher
self.norm_name = 'instance' # atm hardcoded.
self.net = SwinUNETR(img_size=self.img_size[:-1],
in_channels=self.conf.in_channels,
out_channels=32,
feature_size=24,
norm_name=self.norm_name,
use_checkpoint=True,
spatial_dims=2)
self.norm_seg = get_norm_layer(name='batch', spatial_dims=2, channels=32)
self.conv_seg = nn.Conv2d(in_channels=32,
out_channels=self.conf.out_channels,
kernel_size=(1, 1))
self.norm_emb = get_norm_layer(name='batch', spatial_dims=2, channels=32)
self.conv_emb = nn.Conv2d(in_channels=32,
out_channels=32,
kernel_size=(1, 1))
def forward(self, x: torch.Tensor):
# pad students to teacher size
paddings = (0, 0, 0, 0, 0, 0)
if any([x_ < y_ for x_, y_ in zip(x.shape[2:], self.img_size)]):
x_size = x.shape[2:]
paddings = [math.floor((self.img_size[0] - x_size[0]) / 2), math.ceil((self.img_size[0] - x_size[0]) / 2),
math.floor((self.img_size[1] - x_size[1]) / 2), math.ceil((self.img_size[1] - x_size[1]) / 2),
math.floor((self.img_size[2] - x_size[2]) / 2), math.ceil((self.img_size[2] - x_size[2]) / 2)]
paddings[-1] = 0 if self.img_size[2] == 1 else paddings[-1] # don't pad depth singleton dim.
x = F.pad(x, tuple(reversed(paddings)), mode='constant', value=min(x.min(), -1.)) # F.pad needs reverse order (starting from last)
assert x.shape[-1] == 1 # atm only 2D case allowed.
x = self.net(x[..., 0])
x_seg = self.conv_seg(F.leaky_relu(self.norm_seg(x))).unsqueeze(-1)
x_emb = self.conv_emb(F.leaky_relu(self.norm_emb(x))).unsqueeze(-1)
# crop students back to teacher size
if any([x_ > 0 for x_ in paddings]):
x_seg = x_seg[...,
paddings[0]: self.img_size[0] - paddings[1],
paddings[2]: self.img_size[1] - paddings[3],
paddings[4]: self.img_size[2] - paddings[5]] # F.pad needs reverse order (starting from last)
x_emb = x_emb[...,
paddings[0]: self.img_size[0] - paddings[1],
paddings[2]: self.img_size[1] - paddings[3],
paddings[4]: self.img_size[2] - paddings[5]] # F.pad needs reverse order (starting from last)
dict_out = {'dense': {'embedded_latents': x_seg},
'patched': {'embedded_latents': x_emb}}
return dict_out
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
return parser
| Python |
3D | marcdcfischer/PUNet | src/utils/initialization.py | .py | 10,484 | 189 | from typing import Type
import pytorch_lightning as pl
import pathlib as plb
from pytorch_lightning import Trainer
from pytorch_lightning.utilities.cloud_io import load as pl_load
import torch
from src.utils import callbacks, logging_custom
import warnings
def setup_training(hparams, cls_dm: Type[pl.LightningDataModule], cls_model: Type[pl.LightningModule], path_root):
# Load and adjust hparams
if hparams.no_overwrite:
if hparams.ckpt is None:
warnings.warn(f'No checkpoint is available, despite flag --no_overwrite being active.')
else:
hparams_ckpt = pl_load(hparams.ckpt, map_location=lambda storage, loc: storage)['hyper_parameters']
# Overwrite some important (local) values
hparams_ckpt['no_overwrite'] = hparams.no_overwrite
hparams_ckpt['cold_start'] = hparams.cold_start
hparams_ckpt['gpus'] = hparams.gpus
hparams_ckpt['accelerator'] = hparams.accelerator
hparams_ckpt['plugins'] = hparams.plugins
hparams_ckpt['online_on'] = hparams.online_on
hparams_ckpt['checkpoint_callback'] = hparams.checkpoint_callback # Lightning has become picky ...
hparams_ckpt['terminate_on_nan'] = False # Lightning has become picky ...
hparams_ckpt['accumulate_grad_batches'] = 1 # Lightning has become picky ...
hparams_ckpt['ckpt'] = hparams.ckpt
hparams_ckpt['tags'] = hparams.tags if hasattr(hparams, 'tags') else hparams_ckpt['tags']
hparams_ckpt['mode'] = hparams.mode
hparams_ckpt['architecture'] = hparams.architecture
hparams_ckpt['downstream'] = hparams.downstream
hparams_ckpt['separate_background'] = hparams.separate_background
hparams_ckpt['num_annotated'] = hparams.num_annotated
hparams_ckpt['loss_weight_segmentation'] = hparams.loss_weight_segmentation
hparams_ckpt['loss_weight_segmentation_gamma'] = hparams.loss_weight_segmentation_gamma
hparams_ckpt['loss_weight_segmentation_alpha_background'] = hparams.loss_weight_segmentation_alpha_background
hparams_ckpt['loss_weight_segmentation_alpha_background_downstream'] = hparams.loss_weight_segmentation_alpha_background_downstream
hparams_ckpt['loss_weight_segmentation_alpha_foreground'] = hparams.loss_weight_segmentation_alpha_foreground
hparams_ckpt['additive_alpha'] = hparams.additive_alpha
hparams_ckpt['loss_weight_sim_protos_downstream'] = hparams.loss_weight_sim_protos_downstream
hparams_ckpt['learning_rate'] = hparams.learning_rate
hparams_ckpt['learning_rate_downstream'] = hparams.learning_rate_downstream
hparams_ckpt['learning_rate_instructions'] = hparams.learning_rate_instructions
hparams_ckpt['learning_rate_instructions_downstream'] = hparams.learning_rate_instructions_downstream
hparams_ckpt['weight_decay'] = hparams.weight_decay
hparams_ckpt['weight_decay_downstream'] = hparams.weight_decay_downstream
hparams_ckpt['with_scheduler_downstream'] = hparams.with_scheduler_downstream
hparams_ckpt['batch_size'] = hparams.batch_size
hparams_ckpt['max_steps'] = hparams.max_steps
hparams_ckpt['max_epochs'] = hparams.max_epochs
hparams_ckpt['num_samples_epoch'] = hparams.num_samples_epoch
hparams_ckpt['flush_logs_every_n_steps'] = hparams.flush_logs_every_n_steps
# Replace keys not available in all cases
for key_ in ['adaptation_variant', 'prompting_variant', 'selective_freezing',
'freeze_body', 'freeze_norm', 'freeze_bias_scores', 'freeze_inactive', 'fixed_output',
'tokens_per_instruction', 'mean_aggregation', 'top_k_selection',
'soft_selection_sigma', 'noninstructed_attention', 'no_bias_instructions', 'instructions_use_norm', 'instructions_elementwise_affine',
'label_indices_base', 'label_indices_downstream_active', 'label_indices_max_active']:
if hasattr(hparams, key_):
hparams_ckpt[key_] = getattr(hparams, key_)
# Misc
hparams_ckpt['tmp_dir'] = hparams.tmp_dir
hparams_ckpt['export_dir'] = str(plb.Path(hparams.export_dir) / plb.Path(hparams.ckpt).parent.name) if hparams.export_dir else str(plb.Path(hparams.ckpt).parent / 'predictions')
hparams_ckpt['dir_images'] = hparams.dir_images
hparams_ckpt['dir_masks'] = hparams.dir_masks
hparams_ckpt['default_root_dir'] = str(path_root / 'logs' / 'lightning')
hparams_ckpt['gpus'] = hparams.gpus
hparams_ckpt['accelerator'] = hparams.accelerator
hparams_ckpt['plugins'] = hparams.plugins
# Logging
hparams_ckpt['online_on'] = hparams.online_on
hparams_ckpt['plot_interval_train'] = hparams.plot_interval_train
hparams_ckpt['plot_interval_val'] = hparams.plot_interval_val
hparams_ckpt['plot_interval_test'] = hparams.plot_interval_test
# Remove unwanted keys
del hparams_ckpt['run_name']
for k_, v_ in hparams_ckpt.items():
if k_ in hparams.__dict__.keys():
print(f'Overwriting {k_} with {v_} from passed args / ckpt instead of {hparams.__dict__[k_]}')
else:
print(f'Setting {k_} to {v_} from passed args / ckpt.')
hparams.__dict__.update(hparams_ckpt)
# Logging
loggers = logging_custom.setup_loggers(hparams, path_root=path_root)
ckpt_callback = callbacks.setup_checkpointing(hparams)
# Setup
dm = cls_dm(hparams)
dm.prepare_data()
dm.setup() # Currently raises a deprecation warning.
if hparams.ckpt is not None and hparams.cold_start:
model = cls_model.load_from_checkpoint(hparams.ckpt,
**hparams.__dict__,
strict=False if hparams.downstream else True) # overwrite params
resume_from_checkpoint = None
else:
model = cls_model(hparams)
resume_from_checkpoint = hparams.ckpt
# Train model
check_val_every_n_epoch_ = hparams.check_val_every_n_epoch_ if hasattr(hparams, 'check_val_every_n_epoch_') and hparams.check_val_every_n_epoch_ is not None else hparams.check_val_every_n_epoch # set via custom argparse parameter
check_val_every_n_epoch_ = check_val_every_n_epoch_ if not hparams.downstream else hparams.check_val_every_n_epoch_downstream
trainer = Trainer.from_argparse_args(hparams,
gpus=hparams.gpus, # always set gpus via run config / cmd line arguments
gradient_clip_val=1.,
gradient_clip_algorithm='value',
check_val_every_n_epoch=check_val_every_n_epoch_,
resume_from_checkpoint=resume_from_checkpoint,
callbacks=[ckpt_callback],
logger=loggers,
precision=16 if 'wip' in hparams.architecture else 32) # limit_train_batches=10, num_sanity_val_steps=0
model.summarize(max_depth=-1) # avoid calling this during trainer creation due to potential logging buffer congestion
return dm, model, trainer
def setup_testing(hparams, cls_dm: Type[pl.LightningDataModule], cls_model: Type[pl.LightningModule], path_root):
# Adjust hparams
assert hparams.ckpt is not None
hparams_ckpt = pl_load(hparams.ckpt, map_location=lambda storage, loc: storage)['hyper_parameters']
# Overwrite some important (local) values
hparams_ckpt['no_overwrite'] = hparams.no_overwrite
hparams_ckpt['cold_start'] = hparams.cold_start
hparams_ckpt['gpus'] = hparams.gpus
hparams_ckpt['accelerator'] = hparams.accelerator
hparams_ckpt['plugins'] = hparams.plugins
hparams_ckpt['online_on'] = False
hparams_ckpt['checkpoint_callback'] = hparams.checkpoint_callback # Lightning has become picky ...
hparams_ckpt['terminate_on_nan'] = False # Lightning has become picky ...
hparams_ckpt['accumulate_grad_batches'] = 1 # Lightning has become picky ...
hparams_ckpt['ckpt'] = hparams.ckpt
hparams_ckpt['mode'] = hparams.mode
hparams_ckpt['flush_logs_every_n_steps'] = hparams.flush_logs_every_n_steps
# Remove outdated keys
if 'freeze_bias' in hparams_ckpt:
del hparams_ckpt['freeze_bias']
if 'learning_rate_cnn' in hparams_ckpt:
del hparams_ckpt['learning_rate_cnn']
# Misc
hparams_ckpt['tmp_dir'] = hparams.tmp_dir
hparams_ckpt['export_dir'] = str(plb.Path(hparams.export_dir) / plb.Path(hparams.ckpt).parent.name) if hparams.export_dir else str(plb.Path(hparams.ckpt).parent / 'predictions')
hparams_ckpt['dir_images'] = hparams.dir_images
hparams_ckpt['dir_masks'] = hparams.dir_masks
hparams_ckpt['default_root_dir'] = str(path_root / 'logs' / 'lightning')
hparams_ckpt['gpus'] = hparams.gpus
hparams_ckpt['accelerator'] = hparams.accelerator
hparams_ckpt['plugins'] = hparams.plugins
# Logging
hparams_ckpt['online_on'] = hparams.online_on
hparams_ckpt['plot_interval_train'] = hparams.plot_interval_train
hparams_ckpt['plot_interval_val'] = hparams.plot_interval_val
hparams_ckpt['plot_interval_test'] = hparams.plot_interval_test
hparams.__dict__.update(hparams_ckpt)
# Logging
loggers = logging_custom.setup_loggers(hparams, path_root=path_root)
# Setup
dm = cls_dm(hparams)
dm.prepare_data()
dm.setup() # Currently raises a deprecation warning.
model = cls_model.load_from_checkpoint(hparams.ckpt,
**hparams.__dict__) # overwrite parameters
trainer = Trainer.from_argparse_args(hparams,
gradient_clip_val=1.,
gradient_clip_algorithm='value',
gpus=hparams.gpus,
logger=loggers,
resume_from_checkpoint=None)
return dm, model, trainer
| Python |
3D | marcdcfischer/PUNet | src/utils/__init__.py | .py | 0 | 0 | null | Python |
3D | marcdcfischer/PUNet | src/utils/logging_custom.py | .py | 1,175 | 30 | from datetime import datetime
import pytorch_lightning as pl
import pathlib as plb
def setup_loggers(hparams, path_root):
timestamp = datetime.now().strftime('%m%d%H%M%S')
run_name = f'{plb.Path(path_root).stem}_{timestamp}'
hparams.__dict__['run_name'] = run_name
log_dir = plb.Path(path_root) / 'logs'
log_dir.mkdir(exist_ok=True, parents=True)
hparams.default_root_dir = str(log_dir / 'lightning')
loggers = list()
loggers.append(pl.loggers.TensorBoardLogger(name=run_name,
save_dir=str(log_dir / 'tensorboard')))
if hparams.online_logger.lower() == 'wandb':
loggers.append(pl.loggers.wandb.WandbLogger(name=run_name,
save_dir=log_dir,
project=hparams.online_project,
tags=hparams.tags,
entity=hparams.online_entity,
offline=not hparams.online_on))
else:
raise NotImplementedError()
return loggers
| Python |
3D | marcdcfischer/PUNet | src/utils/callbacks.py | .py | 1,727 | 43 | from argparse import ArgumentParser
import pathlib as plb
import pytorch_lightning as pl
import os
def setup_checkpointing(hparams, save_top_k: int = 3):
# Note: requires hparams attributes that might only be set in setup_loggers()
# configure checkpoint directory
if hparams.s3_bucket.strip(' ') and hparams.online_on:
# bucket directory
dir_ckpt = os.path.join(hparams.s3_bucket, 'checkpoints', hparams.run_name)
else:
# offline directory
dir_ckpt = str(plb.Path(hparams.default_root_dir) / 'checkpoints' / hparams.run_name)
hparams.__dict__['ckpt_dir'] = dir_ckpt
save_last = True if hparams.ckpt_save_last else False
if hparams.loss_weight_segmentation > 0.:
monitor_ = 'val_dice_mean'
mode_ = 'max'
filename_ = f'ckpt_{{epoch:03d}}_{{val_dice_mean:.4f}}'
else:
monitor_ = 'train_loss_mean'
mode_ = 'min'
filename_ = f'ckpt_{{epoch:03d}}_{{train_loss_mean:.4f}}'
checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor=monitor_,
mode=mode_,
save_last=save_last,
save_top_k=save_top_k,
dirpath=dir_ckpt,
filename=filename_,
verbose=True)
return checkpoint_callback
def add_callback_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--ckpt_save_last', action='store_true')
return parser
| Python |
3D | marcdcfischer/PUNet | src/utils/aws.py | .py | 6,010 | 164 | import os
from typing import Optional
import pathlib as plb
import boto3
import sys
import threading
import logging
from botocore.exceptions import ClientError
from argparse import ArgumentParser
logger = logging.getLogger(__name__)
MB = 1024 * 1024
# see https://docs.aws.amazon.com/code-samples/latest/catalog/code-catalog-python-example_code-s3.html
class TransferCallback:
"""
Handle callbacks from the transfer manager.
The transfer manager periodically calls the __call__ method throughout
the upload and download process so that it can take action, such as
displaying progress to the user and collecting data about the transfer.
"""
def __init__(self, target_size):
self._target_size = target_size
self._total_transferred = 0
self._lock = threading.Lock()
self.thread_info = {}
def __call__(self, bytes_transferred):
"""
The callback method that is called by the transfer manager.
Display progress during file transfer and collect per-thread transfer
data. This method can be called by multiple threads, so shared instance
data is protected by a thread lock.
"""
thread = threading.current_thread()
with self._lock:
self._total_transferred += bytes_transferred
if thread.ident not in self.thread_info.keys():
self.thread_info[thread.ident] = bytes_transferred
else:
self.thread_info[thread.ident] += bytes_transferred
target = self._target_size
sys.stdout.write(
f"\r{self._total_transferred / MB} MB of {target} MB transferred "
f"({(self._total_transferred / MB / target) * 100:.2f}%).")
sys.stdout.flush()
def download_with_default_configuration(s3_object, download_file_path):
"""
Download a file from an Amazon S3 bucket to a local folder, using the
default configuration.
"""
file_size = s3_object.content_length / MB
print(f'Downloading ckpt with size {file_size} MB')
transfer_callback = TransferCallback(file_size)
s3_object.download_file(download_file_path, Callback=transfer_callback)
return transfer_callback.thread_info
def list_my_buckets(s3):
print('Buckets:\n\t', *[b.name for b in s3.buckets.all()], sep="\n\t")
def list_objects(bucket, prefix=None):
"""
Lists the objects in a bucket, optionally filtered by a prefix.
Usage is shown in usage_demo at the end of this module.
:param bucket: The bucket to query.
:param prefix: When specified, only objects that start with this prefix are listed.
:return: The list of objects.
"""
try:
if not prefix:
objects = list(bucket.objects.all())
else:
objects = list(bucket.objects.filter(Prefix=prefix))
logger.info("Got objects %s from bucket '%s'",
[o.key for o in objects], bucket.name)
except ClientError:
logger.exception("Couldn't get objects for bucket '%s'.", bucket.name)
raise
else:
return objects
def fetch_ckpt(hparams, object_key: str = '', bucket_name: Optional[str] = 'my_bucket', download_file_path: str = '', digits=6):
"""
:param prefix: if only prefix is given, best ckpt is selected
:param object_key: if object_key is given, particular ckpt is downloaded
:param bucket_name:
:param download_file_path:
:param digits:
:return:
"""
prefix = f'checkpoints/{hparams.ckpt_run_name}'
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
list_my_buckets(s3)
objects = list_objects(bucket, prefix=prefix)
objects.reverse() # higher priority to later ckpts
print(f'Available keys: {[o_.key for o_ in objects]}')
if not object_key:
if not hparams.ckpt_select_last:
# max score
objects = [o_ for o_ in objects if 'last' not in o_.key]
score_ = [o_.key[-5 - digits:-5] for o_ in objects]
if 'loss' in objects[0].key:
score_of_interest_ = min(score_)
elif 'dice' in objects[0].key:
score_of_interest_ = max(score_)
else:
ValueError()
object_selected = s3.Bucket(bucket_name).Object(objects[score_.index(score_of_interest_)].key) # select best ckpt
else:
# max epoch
selection = [o_ for o_ in objects if 'last' in o_.key][0]
object_selected = s3.Bucket(bucket_name).Object(selection.key) # select last ckpt
else:
object_selected = s3.Bucket(bucket_name).Object(object_key)
print(f'Selecting ckpt {object_selected.key}')
if not download_file_path:
download_file_path = plb.Path(hparams.ckpt_local_folder) if hparams.ckpt_local_folder else plb.Path(__file__).parent.parent.parent.parent / 'logs' / 'aws'
download_file_path = str(download_file_path / plb.Path(object_selected.key).parts[-2] / plb.Path(object_selected.key).name)
os.makedirs(plb.Path(download_file_path).parent, exist_ok=True)
print(f'Saving ckpt to {download_file_path}')
if not plb.Path(download_file_path).exists():
download_with_default_configuration(s3_object=object_selected,
download_file_path=download_file_path)
return download_file_path
def add_aws_specific_args(parent_parser):
"""
Specify the hyperparams for this LightningModule
"""
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--ckpt_local_folder', type=str, nargs='?', default='', const='')
parser.add_argument('--ckpt_select_last', action='store_true')
return parser
if __name__ == '__main__':
from collections import namedtuple
Hparams = namedtuple('hparams', ['ckpt_run_name', 'ckpt_local_folder', 'ckpt_select_last'])
hparams = Hparams('medical_wip_0716082939', None, True)
fetch_ckpt(hparams)
| Python |
3D | marcdcfischer/PUNet | src/utils/plotting/similarities_student_teacher.py | .py | 7,531 | 132 | import matplotlib
matplotlib.use('Agg')
import os
import numpy as np
import matplotlib.pyplot as plt
from torchvision.utils import make_grid
from typing import Optional, Dict
from scipy import ndimage
import torch
import pathlib as plb
def visualize_similarities_student_teacher(plots: dict,
x_student: torch.Tensor,
x_teacher: torch.Tensor,
y_teacher: torch.Tensor,
n_elements: int = 6,
dpi: int = 200,
prefix: str = 'val',
path_plots: Optional[str] = None):
"""Visualizes softmaxed similarities"""
# detach items
x_student = x_student.detach().cpu()
x_teacher = x_teacher.detach().cpu()
png_paths = list()
for key_, value_ in plots.items():
if any([s_ in key_ for s_ in ['sim_student_teacher']]):
n_elements_ = min([n_elements, value_.shape[0], value_.shape[4]])
png_paths.append(plot_sim_student_teacher(sim_data=np.array(value_.detach().cpu().float()),
x_student=np.array(x_student),
x_teacher=np.array(x_teacher),
y_teacher=np.array(y_teacher.cpu()),
n_elements=n_elements_,
path_plots=path_plots,
prefix=prefix,
dpi=dpi,
tag=key_))
return png_paths
def plot_sim_student_teacher(sim_data: np.ndarray,
x_student: np.ndarray,
x_teacher: np.ndarray,
y_teacher: np.ndarray,
n_elements: int = 6,
axes_size: float = 2.,
path_plots: Optional[str] = None,
prefix: str = 'val',
dpi: int = 200,
tag: str = ''):
path_plots = os.path.join(path_plots, 'plots') if path_plots is not None else str(plb.Path(__file__).resolve().parent.parent.parent / 'logs' / 'lightning' / 'plots')
os.makedirs(path_plots, exist_ok=True)
# normalize data
x_student = np.stack([(x_student[idx_] - np.amin(x_student[idx_])) / (np.amax(x_student[idx_]) - np.amin(x_student[idx_]) + 1e-12) for idx_ in range(x_student.shape[0])], axis=0) # per img
x_teacher = np.stack([(x_teacher[idx_] - np.amin(x_teacher[idx_])) / (np.amax(x_teacher[idx_]) - np.amin(x_teacher[idx_]) + 1e-12) for idx_ in range(x_teacher.shape[0])], axis=0) # per img
sim_data = (sim_data - np.amin(sim_data)) / (np.amax(sim_data) - np.amin(sim_data) + 1e-12) # overall
# Select elements
x_student = x_student[:n_elements, 0, ...] # [N, H, W, D]. Only first channel
x_teacher = x_teacher[:n_elements, 0, ...]
y_teacher = y_teacher[:n_elements, ...]
x_shape_diff = [x_t - x_s for x_t, x_s in zip(x_teacher.shape[1:], x_student.shape[1:])]
x_student_padded = np.pad(x_student,
((0, 0),
(x_shape_diff[0] // 2, x_shape_diff[0] // 2 + x_shape_diff[0] % 2),
(x_shape_diff[1] // 2, x_shape_diff[1] // 2 + x_shape_diff[1] % 2),
(x_shape_diff[2] // 2, x_shape_diff[2] // 2 + x_shape_diff[2] % 2)))
sim_data_shape = sim_data.shape
sim_data_selected = sim_data[:n_elements,
sim_data_shape[1] // 2,
sim_data_shape[2] // 2,
sim_data_shape[3] // 2,
:, :, :].reshape((n_elements, sim_data_shape[-3], sim_data_shape[-2], sim_data_shape[-1])) # [N, H, W, D]
zoom_x, zoom_y, zoom_z = x_teacher.shape[1] / sim_data_selected.shape[1], x_teacher.shape[2] / sim_data_selected.shape[2], x_teacher.shape[3] / sim_data_selected.shape[3]
sim_data_zoomed = np.stack([ndimage.zoom(sim_data_selected[idx_], zoom=(zoom_x, zoom_y, zoom_z), order=0) for idx_ in range(sim_data_selected.shape[0])], axis=0)
# Simple cross section view
x_student_padded = x_student_padded[..., x_student_padded.shape[-1] // 2]
x_teacher = x_teacher[..., x_teacher.shape[-1] // 2]
y_teacher = y_teacher[..., y_teacher.shape[-1] // 2]
sim_data_zoomed = sim_data_zoomed[..., sim_data_zoomed.shape[-1] // 2]
# Image grid
x_images = torch.Tensor(np.concatenate([np.rot90(x_student_padded, 1, axes=(-2, -1)), np.rot90(x_teacher, axes=(-2, -1))], axis=0)).unsqueeze(1)
x_sim_1 = torch.Tensor(np.concatenate([np.zeros_like(np.rot90(sim_data_zoomed, axes=(-2, -1))), np.rot90(sim_data_zoomed, axes=(-2, -1))], axis=0)).unsqueeze(1)
x_sim_2 = 1 - torch.Tensor(np.concatenate([np.ones_like(np.rot90(sim_data_zoomed, axes=(-2, -1))), np.rot90(sim_data_zoomed, axes=(-2, -1))], axis=0)).unsqueeze(1)
y_overlay = torch.Tensor(np.concatenate([np.zeros_like(np.rot90(x_student_padded, 1, axes=(-2, -1))), np.rot90(y_teacher, axes=(-2, -1))], axis=0)).unsqueeze(1)
grid_images = make_grid(x_images, nrow=n_elements, normalize=False).numpy().transpose(1, 2, 0)
grid_sim_1 = make_grid(x_sim_1, nrow=n_elements, normalize=False).numpy().transpose(1, 2, 0)
grid_sim_2 = make_grid(x_sim_2, nrow=n_elements, normalize=False).numpy().transpose(1, 2, 0)
grid_overlay = make_grid(y_overlay, nrow=n_elements, normalize=False).numpy().transpose(1, 2, 0)
pos_y = x_teacher.shape[1] // 2
pos_x = x_teacher.shape[2] // 2
grid_pos_x = [pos_x + 2 + idx_x % n_elements * (2 + 2 * pos_x) for _ in range(2) for idx_x in range(n_elements)]
grid_pos_y = [pos_y + 2 + idx_y // 1 * (2 + 2 * pos_y) for idx_y in range(2) for _ in range(n_elements)]
n_rows, n_cols = 2, n_elements
cmap_ = plt.cm.get_cmap('jet')
cmap_.set_under(color='k', alpha=0)
cmap_2 = plt.cm.get_cmap('cool')
cmap_2.set_under(color='k', alpha=0)
cmap_3 = plt.cm.get_cmap('Blues')
cmap_3.set_under(color='k', alpha=0)
fig, ax = plt.subplots(figsize=(axes_size * n_cols, axes_size * n_rows))
ax.axis('off')
ax.imshow(grid_images[..., 0], cmap='gray')
content_ = grid_sim_1[..., 0]
vmax_ = content_.max()
vmin_ = (content_.max() - content_.min()) / 2
im = ax.imshow(content_, cmap=cmap_, alpha=0.4 * (np.maximum(content_, vmin_) - vmin_) / (vmax_ - vmin_), vmin=vmin_, vmax=vmax_, interpolation='nearest')
content_ = grid_sim_2[..., 0]
vmax_ = content_.max()
vmin_ = (content_.max() - content_.min()) / 2
im = ax.imshow(content_, cmap=cmap_2, alpha=0.6 * (np.maximum(content_, vmin_) - vmin_) / (vmax_ - vmin_), vmin=0.1, vmax=vmax_, interpolation='nearest')
content_ = grid_overlay[..., 0]
vmax_ = max(content_.max(), 1)
vmin_ = content_.min() + 0.01
im = ax.imshow(content_, cmap=cmap_3, alpha=0.75, vmin=vmin_, vmax=vmax_, interpolation='nearest')
# fig.colorbar(im)
# ax.scatter(grid_pos_x, grid_pos_y, s=2, c='magenta')
png_path = f'{path_plots}/{prefix}_{tag}.png'
fig.tight_layout()
plt.savefig(png_path, bbox_inches='tight', dpi=dpi)
plt.close(fig)
return png_path
| Python |
3D | marcdcfischer/PUNet | src/utils/plotting/image_grid.py | .py | 3,538 | 65 | import matplotlib
matplotlib.use('Agg')
import torch
import matplotlib.pyplot as plt
from torchvision.utils import make_grid
import itertools
import os
import math
from typing import Optional, List
import torch.nn.functional as F
def plot_grid_middle(x,
targets: Optional[torch.Tensor] = None,
preds: Optional[torch.Tensor] = None,
scribbles: Optional[torch.Tensor] = None,
indices_elements: List[int] = (0, 4, 8),
prefix='val', dpi=200, axes_size=8, path_plots=None):
path_plots = os.path.join(path_plots, 'plots') if path_plots is not None else os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', 'logs', 'lightning', 'plots')
os.makedirs(path_plots, exist_ok=True)
with torch.no_grad():
x = x[..., x.shape[4]//2]
targets = targets[..., targets.shape[4]//2]
preds = preds[..., preds.shape[4]//2]
scribbles = scribbles[..., scribbles.shape[3]//2] if scribbles is not None else None
channels = targets.shape[1]
# zoom elements
zoom_input_x, zoom_input_y = preds.shape[2] / x.shape[2], x.shape[3] / x.shape[3]
zoom_target_x, zoom_target_y = preds.shape[2] / targets.shape[2], preds.shape[3] / targets.shape[3]
if zoom_input_x < 1. or zoom_target_y < 1.:
x_zoomed = F.interpolate(x, size=preds.shape[2:])
else:
x_zoomed = x
if zoom_target_x < 1. or zoom_target_y < 1.:
targets_zoomed = F.one_hot(F.interpolate(targets.argmax(dim=1, keepdim=True), size=preds.shape[2:], mode='nearest').squeeze(1), num_classes=channels).permute(0, 3, 1, 2)
scribbles_zoomed = F.interpolate(scribbles.unsqueeze(dim=1), size=preds.shape[2:], mode='nearest').squeeze(dim=1) if scribbles is not None else None
else:
targets_zoomed = targets
scribbles_zoomed = scribbles
png_paths = list()
for idx_img in [min(x.shape[0] - 1, n_) for n_ in indices_elements]:
targets_slices = [targets_zoomed[idx_img:idx_img + 1, idx_:idx_ + 1, ...].float() for idx_ in range(channels)]
preds_slices = [preds[idx_img:idx_img + 1, idx_:idx_ + 1, ...].float() for idx_ in range(channels)]
paired_slices = list(itertools.chain(*list(zip(targets_slices, preds_slices))))
grid = make_grid(torch.cat(
[((x_zoomed[idx_img:idx_img + 1, ...]) - torch.min(x_zoomed[idx_img:idx_img + 1, ...])) / (torch.max(x_zoomed[idx_img:idx_img + 1, ...] - torch.min(x_zoomed[idx_img:idx_img + 1, ...]))),
scribbles_zoomed[idx_img:idx_img + 1, ...].unsqueeze(dim=1).float() / channels if scribbles is not None else torch.zeros_like(x_zoomed[idx_img:idx_img + 1, ...]),
torch.argmax(targets_zoomed[idx_img:idx_img + 1, ...], dim=1, keepdim=True).float() / channels,
torch.argmax(preds[idx_img:idx_img + 1, ...], dim=1, keepdim=True).float() / channels,
*paired_slices
], dim=1).permute(1, 0, 2, 3), padding=5).numpy().transpose(1, 2, 0)
fig, ax = plt.subplots(figsize=(axes_size * int(math.ceil(channels / 8.)), axes_size * 8))
ax.axis('off')
fig.tight_layout()
ax.imshow(grid)
png_paths.append(f'{path_plots}/{prefix}_{str(idx_img).zfill(2)}_grid.png')
plt.savefig(png_paths[-1], bbox_inches='tight', dpi=dpi)
plt.close(fig)
return png_paths
| Python |
3D | marcdcfischer/PUNet | src/utils/plotting/__init__.py | .py | 0 | 0 | null | Python |
3D | marcdcfischer/PUNet | src/data/conversion_monai.py | .py | 2,202 | 50 | import torch
import torchio as tio
import pathlib as plb
from typing import Union, Optional, Tuple
import pandas as pd
import nibabel as nib
import numpy as np
def convert_subjects(df_data: pd.DataFrame,
with_coords: bool = True,
visualize: bool = False):
subject_dicts = list()
content_keys = ['image', 'coord_grid']
aux_keys = ['name', 'domain', 'frame', 'annotated', 'age']
for idx_, row_ in df_data.iterrows():
subject_dicts.append(
{
'image': row_['images'],
'coord_grid': _gen_mesh_grid(nib.load(row_['images']).shape) if with_coords else 0,
'name': row_['names'],
'frame': row_['frames'], # NOT 'names' as they might not be unique!
'domain': row_['domains'],
'annotated': row_['annotated'],
'age': 0, # Or whatever you might want to add
}
)
# Add available annotations
if 'masks' in row_.keys() and row_['masks'] is not None:
subject_dicts[-1]['label'] = row_['masks']
if 'label' not in content_keys:
content_keys.append('label')
if 'pseudos' in row_.keys() and row_['pseudos'] is not None:
subject_dicts[-1]['pseudo'] = row_['pseudos']
if 'pseudo' not in content_keys:
content_keys.append('pseudo')
if 'scribbles' in row_.keys() and row_['scribbles'] is not None:
subject_dicts[-1]['scribbles'] = row_['scribbles']
if 'scribbles' not in content_keys:
content_keys.append('scribbles')
return subject_dicts, content_keys, aux_keys
def _gen_mesh_grid(image_size: Tuple[int, int, int]):
coord_grid = torch.stack(torch.meshgrid(torch.arange(image_size[0]), torch.arange(image_size[1]), torch.arange(image_size[2])), dim=0).float() # [3, H, W, D]
coord_grid -= torch.tensor(((image_size[0] - 1) / 2., (image_size[1] - 1) / 2., (image_size[2] - 1) / 2.)).reshape(3, 1, 1, 1) # Shift coords so center of volume is at [0, 0, 0]. That way misalignments are most severe at the boundaries.
return np.array(coord_grid)
| Python |
3D | marcdcfischer/PUNet | src/data/collate.py | .py | 256 | 10 | from torch.utils.data._utils.collate import default_collate
def collate_list(batch):
"""Flattens list and passes it to standard collate function"""
batch = [item_ for elements_ in batch for item_ in elements_]
return default_collate(batch)
| Python |
3D | marcdcfischer/PUNet | src/data/__init__.py | .py | 0 | 0 | null | Python |
3D | marcdcfischer/PUNet | src/data/loader_monai.py | .py | 13,297 | 208 | from torch.utils.data import WeightedRandomSampler, RandomSampler # Would this (random_split) be a good option for ddp?
from argparse import ArgumentParser, Namespace
from src.data.distributed_wrapper import DistributedSamplerWrapper
import torch
import pytorch_lightning as pl
from typing import Union, Dict, Optional
import numpy as np
from src.data.transforms_monai import generate_transforms, generate_test_transforms
from src.data.conversion_monai import convert_subjects
# from src.data.datasets.gather_tiny_ixi import gather_data
from src.data.datasets import gather_tcia_btcv, gather_ctorg
from monai.data import DataLoader, Dataset # Wrapper around torch DataLoader and Dataset
class BasicDataModule(pl.LightningDataModule):
def __init__(self, conf: Union[Dict, Namespace]):
super().__init__()
self.conf = conf # Model and DataModule hparams are now the same. So only one save_hyperparameters is allowed
self.transform_train, self.transform_val, self.transform_test, self.transform_test_post = None, None, None, None
self.df_train, self.df_val, self.df_test = None, None, None,
self.ds_train, self.ds_val, self.ds_test = None, None, None
def prepare_data(self):
# Nothing to do here - best to do it offline
pass
def _get_max_shape_train(self, ds_subjects: Dataset):
shapes = np.array([crop_['image'].shape for sub_ in ds_subjects for crop_ in sub_])
return shapes.max(axis=0)
def _get_max_shape_val(self, ds_subjects: Dataset):
shapes = np.array([sub_['image'].shape for sub_ in ds_subjects])
return shapes.max(axis=0)
def _get_foreground_background_ratio(self, ds_subjects: Dataset):
ratios = np.zeros((self.conf.out_channels - 1,))
for sub_ in ds_subjects:
background_ = (sub_['label'] == 0.).float().count_nonzero().item()
for idx_ in range(1, self.conf.out_channels):
ratios[idx_ - 1] += (sub_['label'] == idx_).float().count_nonzero().item() / background_
ratios /= len(ds_subjects)
ratios_inv = 1 / ratios
ratios_bound = np.concatenate([[0], self.conf.additive_alpha_factor * ratios_inv], axis=0)
return ratios_bound
def setup(self, stage: Optional[str] = None):
# Assign train/val datasets for use in dataloaders
if stage == 'fit' or stage is None:
# Data gathering / preparation
if self.conf.dataset.lower() == 'tcia_btcv':
self.df_train, self.df_val, self.df_test = gather_tcia_btcv.generate_dataframes(self.conf)
elif self.conf.dataset.lower() == 'ctorg':
self.df_train, self.df_val, self.df_test = gather_ctorg.generate_dataframes(self.conf)
else:
raise NotImplementedError()
dict_subjects_train, content_keys, aux_keys = convert_subjects(self.df_train)
dict_subjects_val, _, _ = convert_subjects(self.df_val)
dict_subjects_test, _, _ = convert_subjects(self.df_test)
self.transform_train, self.transform_val = generate_transforms(patch_size_students=self.conf.patch_size_students,
patch_size_teacher=self.conf.patch_size_teacher,
content_keys=content_keys,
aux_keys=aux_keys,
num_samples=self.conf.num_samples,
n_transforms=self.conf.num_transforms,
orientation='xy') # Cropping or padding screws with the coord grid; so prepare data beforehand properly.
self.transform_test = generate_test_transforms(content_keys=content_keys,
aux_keys=aux_keys)
transform_train_zy, _ = generate_transforms(patch_size_students=self.conf.patch_size_students,
patch_size_teacher=self.conf.patch_size_teacher,
content_keys=content_keys,
aux_keys=aux_keys,
num_samples=self.conf.num_samples,
n_transforms=self.conf.num_transforms,
orientation='zy')
transform_train_xz, _ = generate_transforms(patch_size_students=self.conf.patch_size_students,
patch_size_teacher=self.conf.patch_size_teacher,
content_keys=content_keys,
aux_keys=aux_keys,
num_samples=self.conf.num_samples,
n_transforms=self.conf.num_transforms,
orientation='xz')
# Datasets
self.ds_train = Dataset(data=dict_subjects_train, transform=self.transform_train) # (monai's) CacheDataset may bring some speed up (for deterministic transforms)
if self.conf.orientation_swap:
self.ds_train_zy = Dataset(data=dict_subjects_train, transform=transform_train_zy)
self.ds_train_xz = Dataset(data=dict_subjects_train, transform=transform_train_xz)
self.ds_val = Dataset(data=dict_subjects_val, transform=self.transform_val)
self.ds_test = Dataset(data=dict_subjects_test, transform=self.transform_test)
recalc_ratios = False # atm hardcoded
if recalc_ratios:
self.ds_train_dummy = Dataset(data=dict_subjects_train, transform=self.transform_test)
additive_alpha = self._get_foreground_background_ratio(self.ds_train_dummy)
print(f'Recalced additive alpha based on foreground / background ratio: {additive_alpha}')
max_shape_train = self._get_max_shape_train(self.ds_train)
max_shape_val = self._get_max_shape_val(self.ds_val)
print(f'Amount of training samples: {len(self.ds_train)}, and validation samples: {len(self.ds_val)}.')
print(f'Max shapes for train: {max_shape_train} and val: {max_shape_val}.')
[print(f'Key: {k_}, Value: {type(v_)}') for k_, v_ in self.ds_train[0][0].items()]
print(f'Using additive alpha: {self.conf.additive_alpha}.')
# Assign test dataset for use in dataloader(s)
elif stage == 'test':
raise NotImplementedError() # Atm. only one stage is used for all routines.
else:
raise ValueError(f'Stage {stage} is not available.')
# For self-sup augmentation see: https://github.com/PyTorchLightning/Lightning-Bolts/blob/master/pl_bolts/models/self_supervised/simclr/transforms.py#L17-L91
def train_dataloader(self):
# See site-packages/pytorch_lightning/trainer for replace_sampler_ddp
num_samples = int(self.conf.num_samples_epoch / self.conf.num_transforms / self.conf.num_samples)
print(f'Drawing {num_samples} of {len(self.ds_train)} training samples.')
if self.conf.weighting:
shuffle = False # Exclusive with sampler - nonetheless shuffling can be added
weights = torch.tensor(self.df_train['weights'].values)
# num_samples = int(len(self.ds_train) * self.conf.sample_factor) # Draw half of the dataset (at random), distributed sampler takes care of length for ddp (which splits the data)
if self.conf.accelerator == 'ddp':
# Note: neither torch and lightning support distributed versions of most samplers - this is the catalyst implementation # TODO: Check if a fitting version is now available
# Lightning takes care of set_epoch for proper shuffling
sampler = DistributedSamplerWrapper(WeightedRandomSampler(weights=weights, num_samples=num_samples, replacement=self.conf.replacement), shuffle=True)
else:
sampler = WeightedRandomSampler(weights=weights, num_samples=num_samples, replacement=self.conf.replacement) # should always shuffle (due to random draw)
else:
sampler = RandomSampler(self.ds_train, replacement=True, num_samples=num_samples)
assert (self.conf.batch_size / self.conf.num_transforms / self.conf.num_samples).is_integer()
loader_xy = DataLoader(self.ds_train,
batch_size=self.conf.batch_size // self.conf.num_transforms // self.conf.num_samples,
num_workers=self.conf.num_workers,
pin_memory=self.conf.pin_memory,
shuffle=False, # Has to be False for given sampler
sampler=sampler,
# collate_fn=collate_list,
drop_last=True)
loaders = [loader_xy]
if self.conf.orientation_swap:
loader_zy = DataLoader(self.ds_train_zy,
batch_size=self.conf.batch_size // self.conf.num_transforms // self.conf.num_samples,
num_workers=self.conf.num_workers,
pin_memory=self.conf.pin_memory,
shuffle=False, # Has to be False for given sampler
sampler=sampler,
# collate_fn=collate_list,
drop_last=True)
loader_xz = DataLoader(self.ds_train_xz,
batch_size=self.conf.batch_size // self.conf.num_transforms // self.conf.num_samples,
num_workers=self.conf.num_workers,
pin_memory=self.conf.pin_memory,
shuffle=False, # Has to be False for given sampler
sampler=sampler,
# collate_fn=collate_list,
drop_last=True)
loaders.extend([loader_zy, loader_xz])
return loaders
def val_dataloader(self):
return DataLoader(self.ds_val,
batch_size=1, # Note: Can be smaller than batch_size if len(ds_val) < batch_size.
num_workers=self.conf.num_workers,
pin_memory=self.conf.pin_memory)
def test_dataloader(self):
return DataLoader(self.ds_test,
batch_size=1,
num_workers=self.conf.num_workers,
pin_memory=self.conf.pin_memory)
def predict_dataloader(self):
return DataLoader(self.ds_test,
batch_size=1,
num_workers=self.conf.num_workers,
pin_memory=self.conf.pin_memory)
@staticmethod
def add_data_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--pin_memory', action='store_true')
parser.add_argument('--replacement', default=True, type=bool) # Use sampling with replacement if annotation is only sparsely available
parser.add_argument('--weighting', default=True, type=bool) # Custom weighting
parser.add_argument('--sample_factor', default=1., type=float) # Amount of samples drawn per epoch for weighted sampling
parser.add_argument('--num_transforms', default=1, type=int) # Amount of transforms applied to a selected (cropped) image. For > 1, this produces a "positive" pair. Keep it high-ish so crops find some overlapping regions
parser.add_argument('--num_samples', default=2, type=int) # Samples per subject
parser.add_argument('--num_samples_epoch', default=5000, type=int) # Amount of samples in an epoch
parser.add_argument('--queue_max_length', default=36, type=int)
parser.add_argument('--orientation_swap', default=False, type=bool) # Randomly permute axes
parser.add_argument('--max_subjects_train', default=-1, type=int)
parser.add_argument('--dataset', default='tcia_btcv', type=str, choices=['tcia_btcv', 'ctorg'])
hparams_tmp = parser.parse_known_args()[0]
if hparams_tmp.dataset.lower() == 'tcia_btcv':
parser = gather_tcia_btcv.add_data_specific_args(parser)
elif hparams_tmp.dataset.lower() == 'ctorg':
parser = gather_ctorg.add_data_specific_args(parser)
else:
raise NotImplementedError(f'The selected architecture {hparams_tmp.architecture} is not available.')
return parser
| Python |
3D | marcdcfischer/PUNet | src/data/transforms_monai.py | .py | 17,243 | 247 | from typing import Optional, Tuple, List, Type
import monai.transforms as mtransforms
import itertools
import pathlib as plb
# See https://github.com/Project-MONAI/tutorials/blob/master/3d_segmentation/unetr_btcv_segmentation_3d_lightning.ipynb for a recent example
def generate_transforms(patch_size_students: List[Tuple[int, int, int]],
patch_size_teacher: Optional[Tuple[int, int, int]] = None,
content_keys: Optional[List[str]] = None,
aux_keys: Optional[List[str]] = None,
num_samples: int = 2, # Different slice crops per volume
n_transforms: int = 1, # Different transforms of cropped volumes
orientation: str = 'xy',
intensity_inversion: bool = False,
augmentation_mode: str = '2d',
shape_z: int = 1,
orientation_augmentation: bool = False):
patch_size_teacher = patch_size_teacher if patch_size_teacher is not None else patch_size_students[0]
if content_keys is None:
content_keys = ['image', 'label']
# Original keys are considered student keys
content_keys_trafos = [[key_ + f'_trafo{str(idx_trafo)}' for key_ in content_keys] for idx_trafo in range(1, n_transforms)]
aux_keys_trafos = [[key_ + f'_trafo{str(idx_trafo)}' for key_ in aux_keys] for idx_trafo in range(1, n_transforms)]
content_keys_all_student_first = [content_keys] + content_keys_trafos # list of lists (so every sample can be transformed independently)
aux_keys_all_student_first = [aux_keys] + aux_keys_trafos
# Students extended keys (e.g. smaller variant)
# Following scheme [idx_student][idx_trafo][key]
n_students = len(patch_size_students)
content_keys_students = [content_keys]
content_keys_all_students = [content_keys_all_student_first]
aux_keys_all_students = [aux_keys_all_student_first]
for idx_student in range(0, n_students - 1):
content_keys_students.append([key_ + f'_var{str(idx_student)}' for key_ in content_keys]) # Student content keys without trafo
content_keys_all_students.append([[key_ + f'_var{str(idx_student)}' for key_ in content_keys_] for content_keys_ in content_keys_all_student_first]) # Student content keys with trafo
aux_keys_all_students.append([[key_ + f'_var{str(idx_student)}' for key_ in aux_keys_] for aux_keys_ in aux_keys_all_student_first])
# Teacher keys
content_keys_teacher = [key_ + '_teacher' for key_ in content_keys] # [key]
content_keys_all_teacher = [[key_ + '_teacher' for key_ in content_keys_] for content_keys_ in content_keys_all_student_first] # [idx_trafo][key]
aux_keys_all_teacher = [[key_ + '_teacher' for key_ in aux_keys_] for aux_keys_ in aux_keys_all_student_first] # [idx_trafo][key]
# All keys
content_keys_all = content_keys_all_students + [content_keys_all_teacher] # [idx_student / teacher][idx_trafo][key]
aux_keys_all = aux_keys_all_students + [aux_keys_all_teacher] # [idx_student / teacher][idx_trafo][key]
# Pre-processing
transform_train = mtransforms.Compose([
# Image loading, normalization and (sub-)selection
mtransforms.LoadImaged(keys=[x_ for x_ in content_keys if 'grid' not in str(x_)]), # Exclude tensors from loading
mtransforms.EnsureChannelFirstd(keys=[x_ for x_ in content_keys if 'grid' not in str(x_)]), # Grid already has a channel (and does not have metadata for a check)
# mtransforms.NormalizeIntensityd(keys=[x_ for x_ in content_keys if 'image' in str(x_)])
mtransforms.ScaleIntensityRanged(keys=[x_ for x_ in content_keys if 'image' in str(x_)], a_min=-1000, a_max=1000, b_min=-1, b_max=1, clip=True), # CT only
])
# Rotate orientation
if orientation == 'xy':
pass
elif orientation == 'zy':
transform_train = mtransforms.Compose([
transform_train,
mtransforms.Rotate90d(keys=content_keys, k=1, spatial_axes=(0, 2)), # Rotate xz (as orientation augmentation) so resulting slices contain zy
])
elif orientation == 'xz':
transform_train = mtransforms.Compose([
transform_train,
mtransforms.Rotate90d(keys=content_keys, k=1, spatial_axes=(1, 2)), # Rotate yz (as orientation augmentation) so resulting slices contain xz
])
else:
raise ValueError(f'Orientation {orientation} is not a valid choice.')
# Generate samples
transform_train = mtransforms.Compose([
transform_train,
mtransforms.RandSpatialCropSamplesd(keys=content_keys, roi_size=(patch_size_teacher[0], patch_size_teacher[1], patch_size_teacher[2]), random_center=True, random_size=False, num_samples=num_samples), # Generates num_samples different slices
# mtransforms.Resized(keys=content_keys, spatial_size=(256, 256, 1), mode=['nearest' if 'label' in key_ else 'trilinear' for key_ in content_keys]), # Should usually be done in (offline) pre-processing
mtransforms.CopyItemsd(keys=content_keys + aux_keys, times=n_transforms - 1, names=list(itertools.chain(*(content_keys_trafos + aux_keys_trafos)))) if n_transforms > 1 else mtransforms.Compose([]), # Copies selected slices (and auxiliary info) for different augmentations
mtransforms.CopyItemsd(keys=list(itertools.chain(*(content_keys_all_student_first + aux_keys_all_student_first))), times=1,
names=list(itertools.chain(*(content_keys_all_teacher + aux_keys_all_teacher)))),
])
# Add further student samples
for idx_student in range(1, n_students):
transform_train = mtransforms.Compose([
transform_train,
mtransforms.CopyItemsd(keys=list(itertools.chain(*(content_keys_all_student_first + aux_keys_all_student_first))), times=1,
names=list(itertools.chain(*(content_keys_all_students[idx_student] + aux_keys_all_students[idx_student])))),
])
# Masking - see https://github.com/Project-MONAI/tutorials/tree/master/self_supervised_pretraining for more
# Only applied to student.
# dropout_holes = True -> replaces values inside region. dropout_holes = False -> replaces values outside region
# Students (large and small) specific augmentations
for idx_student in range(n_students):
for idx_trafo in range(n_transforms):
transform_train = mtransforms.Compose([
transform_train,
mtransforms.OneOf([
mtransforms.Compose([]), # Dummy for applying nothing
mtransforms.RandCoarseDropoutd(keys=[x_ for x_ in content_keys_all_students[idx_student][idx_trafo] if 'image' in str(x_)], prob=1.0,
dropout_holes=True, holes=1, max_holes=3, spatial_size=5, max_spatial_size=20), # spatial_size=20, max_spatial_size=40 used for a visualization example
mtransforms.RandCoarseDropoutd(keys=[x_ for x_ in content_keys_all_students[idx_student][idx_trafo] if 'image' in str(x_)], prob=1.0,
dropout_holes=False, holes=6, spatial_size=48, max_spatial_size=96), # Holes are aggregated prior to outer fill.
mtransforms.RandCoarseShuffled(keys=[x_ for x_ in content_keys_all_students[idx_student][idx_trafo] if 'image' in str(x_)], prob=1.0,
holes=1, max_holes=3, spatial_size=5, max_spatial_size=20),
], weights=(0.7, 0.1, 0.1, 0.1)) # (0.0, 1.0, 0.0, 0.0) used for a visualization example
])
# Augmentations on (pre-)crop
# Note: Different samples (from CropSamples) are handled automatically, as they are internally stored in a list that is later collated.
# Only the ones generated by CopyItem need to be taken care off.
# Students (large and small) are augmented differently
for idx_student in range(n_students):
for idx_trafo in range(n_transforms):
transform_train = mtransforms.Compose([
transform_train,
mtransforms.RandBiasFieldd(keys=[x_ for x_ in content_keys_all_students[idx_student][idx_trafo] if 'image' in str(x_)], prob=0.1),
mtransforms.RandStdShiftIntensityd(keys=[x_ for x_ in content_keys_all_students[idx_student][idx_trafo] if 'image' in str(x_)], prob=0.1, factors=(0.0, 0.25)),
mtransforms.RandAdjustContrastd(keys=[x_ for x_ in content_keys_all_students[idx_student][idx_trafo] if 'image' in str(x_)], prob=0.1),
mtransforms.RandScaleIntensityd(keys=[x_ for x_ in content_keys_all_students[idx_student][idx_trafo] if 'image' in str(x_)], prob=0.1, factors=-2.) if intensity_inversion else mtransforms.Compose([]), # Invert image (v = v * (1 + factor))
mtransforms.RandHistogramShiftd(keys=[x_ for x_ in content_keys_all_students[idx_student][idx_trafo] if 'image' in str(x_)], prob=0.1, num_control_points=(8, 12)), # Shifts around image histogram
# mtransforms.Rand3DElastic(keys=content_keys_transformed[idx_]),
mtransforms.RandAffined(keys=content_keys_all_students[idx_student][idx_trafo],
prob=0.8,
rotate_range=(0., 0., 0.4), # 2D rotate in radians
shear_range=(0.025, 0., 0.025, 0., 0., 0.), # 2D shear
# translate_range=(0.25, 0.25, 0.0), already handled by random crop
scale_range=((-0.25, 0.25), (-0.25, 0.25), (0., 0.)), # 2D scaling. For some reason they add 1.0 internally ...
mode=['nearest' if 'label' in key_ or 'pseudo' in key_ else 'bilinear' for key_ in content_keys_all_students[idx_student][idx_trafo]],
padding_mode='reflection'),
])
# Teacher
for idx_trafo in range(n_transforms):
transform_train = mtransforms.Compose([
transform_train,
mtransforms.RandBiasFieldd(keys=[x_ for x_ in content_keys_all_teacher[idx_trafo] if 'image' in str(x_)], prob=0.1),
mtransforms.RandStdShiftIntensityd(keys=[x_ for x_ in content_keys_all_teacher[idx_trafo] if 'image' in str(x_)], prob=0.1, factors=(0.0, 0.1)),
mtransforms.RandAdjustContrastd(keys=[x_ for x_ in content_keys_all_teacher[idx_trafo] if 'image' in str(x_)], prob=0.1),
mtransforms.RandHistogramShiftd(keys=[x_ for x_ in content_keys_all_teacher[idx_trafo] if 'image' in str(x_)], prob=0.1, num_control_points=12), # Fixed number of control points
mtransforms.RandAffined(keys=content_keys_all_teacher[idx_trafo],
prob=0.5,
rotate_range=(0., 0., 0.4),
shear_range=(0.025, 0., 0.025, 0., 0., 0.),
scale_range=((-0.1, 0.1), (-0.1, 0.1), (0., 0.)),
mode=['nearest' if 'label' in key_ or 'pseudo' in key_ else 'bilinear' for key_ in content_keys_all_teacher[idx_trafo]],
padding_mode='reflection'),
])
# Final conversion
# Students
for idx_student in range(n_students):
for idx_trafo in range(n_transforms):
transform_train = mtransforms.Compose([
transform_train,
mtransforms.RandSpatialCropd(keys=content_keys_all_students[idx_student][idx_trafo], roi_size=patch_size_students[idx_student], random_center=True, random_size=False), # Crop center of augmented patch
mtransforms.SpatialPadd(keys=content_keys_all_students[idx_student][idx_trafo], spatial_size=patch_size_students[idx_student], mode='reflect'), # Needs to have at least this size. Reflect may lead to undesired (repetitive) patterns. Better options consistent with coords?
mtransforms.ToTensord(keys=content_keys_all_students[idx_student][idx_trafo])
])
# Teacher
for idx_trafo in range(n_transforms):
transform_train = mtransforms.Compose([
transform_train,
mtransforms.RandSpatialCropd(keys=content_keys_all_teacher[idx_trafo], roi_size=patch_size_teacher, random_center=True, random_size=False), # Crop center of augmented patch
mtransforms.SpatialPadd(keys=content_keys_all_teacher[idx_trafo], spatial_size=patch_size_teacher, mode='reflect'), # Needs to have at least this size. Reflect may lead to undesired (repetitive) patterns. Better options consistent with coords?
mtransforms.ToTensord(keys=content_keys_all_teacher[idx_trafo])
])
# Join transformed elements (along channel dim) - so it doesn't need to perform within the training / validation routine
# Students
for idx_student in range(n_students):
for key_, keys_student_ in zip(content_keys, content_keys_students[idx_student]):
applied_student_keys_ = [x_ for x_ in list(itertools.chain(*content_keys_all_students[idx_student])) if key_ in str(x_)]
if len(applied_student_keys_) > 0:
transform_train = mtransforms.Compose([transform_train, mtransforms.ConcatItemsd(keys=applied_student_keys_, name=keys_student_)])
# Teacher
for key_, key_teacher_ in zip(content_keys, content_keys_teacher):
applied_teacher_keys = [x_ for x_ in list(itertools.chain(*content_keys_all_teacher)) if key_ in str(x_)]
if len(applied_teacher_keys) > 0:
transform_train = mtransforms.Compose([transform_train, mtransforms.ConcatItemsd(keys=applied_teacher_keys, name=key_teacher_)])
# Discard obsolete additional keys and meta data (of additional trafos and variants)
transform_train = mtransforms.Compose([
transform_train,
mtransforms.SelectItemsd(keys=list(itertools.chain(*content_keys_students)) + content_keys_teacher + aux_keys)
])
# Validation
transform_val = mtransforms.Compose([
mtransforms.LoadImaged(keys=[x_ for x_ in content_keys if 'grid' not in str(x_)]),
mtransforms.EnsureChannelFirstd(keys=[x_ for x_ in content_keys if 'grid' not in str(x_)]),
# mtransforms.NormalizeIntensityd(keys=[x_ for x_ in content_keys if 'image' in str(x_)]),
mtransforms.ScaleIntensityRanged(keys=[x_ for x_ in content_keys if 'image' in str(x_)], a_min=-1000, a_max=1000, b_min=-1, b_max=1, clip=True), # CT only
# mtransforms.RandSpatialCropSamplesd(keys=content_keys, roi_size=patch_size_teacher, random_center=True, random_size=False, num_samples=10),
# mtransforms.SpatialPadd(keys=content_keys, spatial_size=patch_size_teacher, mode='constant'), # Needs to have at least this size. Reflect may lead to undesired (repetitive) patterns. Better options consistent with coords?
mtransforms.ToTensord(keys=content_keys),
mtransforms.SelectItemsd(keys=content_keys + aux_keys) # Discard (currently) unused meta data
])
return transform_train, transform_val
def generate_test_transforms(content_keys: Optional[List[str]] = None,
aux_keys: Optional[List[str]] = None):
if content_keys is None:
content_keys = ['image', 'label']
# Validation
transform_test = mtransforms.Compose([
mtransforms.LoadImaged(keys=[x_ for x_ in content_keys if 'grid' not in str(x_)]),
mtransforms.EnsureChannelFirstd(keys=[x_ for x_ in content_keys if 'grid' not in str(x_)]),
# mtransforms.NormalizeIntensityd(keys=[x_ for x_ in content_keys if 'image' in str(x_)]),
mtransforms.ScaleIntensityRanged(keys=[x_ for x_ in content_keys if 'image' in str(x_)], a_min=-1000, a_max=1000, b_min=-1, b_max=1, clip=True), # CT only
mtransforms.ToTensord(keys=content_keys),
# mtransforms.SelectItemsd(keys=content_keys + aux_keys) # Discard (currently) unused meta data
])
return transform_test
def generate_test_post_transforms(output_dir: str,
output_postfix: str,
transform_test: mtransforms.InvertibleTransform,
n_classes: Optional[int] = None):
# Create output directory (if it doesn't exist)
plb.Path(output_dir).mkdir(parents=True, exist_ok=True)
transform_test_post = mtransforms.Compose([
mtransforms.EnsureTyped(keys="pred"),
mtransforms.Invertd(
keys="pred",
transform=transform_test,
orig_keys="image",
meta_keys="pred_meta_dict",
orig_meta_keys="image_meta_dict",
meta_key_postfix="meta_dict",
nearest_interp=False,
to_tensor=True,
),
mtransforms.AsDiscreted(keys="pred", argmax=True, to_onehot=n_classes),
mtransforms.SaveImaged(keys="pred", meta_keys="image_meta_dict", output_dir=output_dir, output_postfix=output_postfix, resample=False),
])
return transform_test_post
| Python |
3D | marcdcfischer/PUNet | src/data/distributed_wrapper.py | .py | 2,876 | 86 | # Code has been directly copied from
# https://github.com/catalyst-team/catalyst/blob/master/catalyst/data/sampler.py
# https://github.com/catalyst-team/catalyst/blob/df9e07f962ca61986a31c602a4511ad983dad028/catalyst/data/dataset/torch.py#L13
import torch
from torch.utils.data import DistributedSampler, Dataset
from torch.utils.data.sampler import BatchSampler, Sampler
from typing import Iterator, List, Optional, Union
from operator import itemgetter
class DistributedSamplerWrapper(DistributedSampler):
"""
Wrapper over `Sampler` for distributed training.
Allows you to use any sampler in distributed mode.
It is especially useful in conjunction with
`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSamplerWrapper instance as a DataLoader
sampler, and load a subset of subsampled data of the original dataset
that is exclusive to it.
.. note::
Sampler is assumed to be of constant size.
"""
def __init__(
self,
sampler,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
):
"""
Args:
sampler: Sampler used for subsampling
num_replicas (int, optional): Number of processes participating in
distributed training
rank (int, optional): Rank of the current process
within ``num_replicas``
shuffle (bool, optional): If true (default),
sampler will shuffle the indices
"""
super(DistributedSamplerWrapper, self).__init__(
DatasetFromSampler(sampler),
num_replicas=num_replicas,
rank=rank,
shuffle=shuffle,
)
self.sampler = sampler
def __iter__(self):
"""@TODO: Docs. Contribution is welcome."""
self.dataset = DatasetFromSampler(self.sampler)
indexes_of_indexes = super().__iter__()
subsampler_indexes = self.dataset
return iter(itemgetter(*indexes_of_indexes)(subsampler_indexes))
class DatasetFromSampler(Dataset):
"""Dataset to create indexes from `Sampler`.
Args:
sampler: PyTorch sampler
"""
def __init__(self, sampler: Sampler):
"""Initialisation for DatasetFromSampler."""
self.sampler = sampler
self.sampler_list = None
def __getitem__(self, index: int):
"""Gets element of the dataset.
Args:
index: index of the element in the dataset
Returns:
Single element by index
"""
if self.sampler_list is None:
self.sampler_list = list(self.sampler)
return self.sampler_list[index]
def __len__(self) -> int:
"""
Returns:
int: length of the dataset
"""
return len(self.sampler)
| Python |
3D | marcdcfischer/PUNet | src/data/datasets/__init__.py | .py | 0 | 0 | null | Python |
3D | marcdcfischer/PUNet | src/data/datasets/gather_tcia_btcv.py | .py | 5,085 | 98 | from typing import Union, Dict, Tuple
from argparse import ArgumentParser, Namespace
import pathlib as plb
import pandas as pd
import numpy as np
from typing import List
from src.data.datasets.gather_data import _generate_splits, _mask_domains
def generate_dataframes(conf: Union[Dict, Namespace]):
df_dirs = _gather_data(dir_images=conf.dir_images,
dir_masks=conf.dir_masks,
domains=conf.domains)
df_train, df_val, df_test = _generate_splits(df_dirs,
num_annotated=conf.num_annotated,
domains=conf.domains,
max_subjects_train=conf.max_subjects_train)
df_train = _mask_domains(df_train,
modality=conf.masked_modality,
valid_choices=conf.domains)
return df_train, df_val, df_test
def _gather_data(dir_images: str,
dir_masks: str,
domains: List[str]):
data_dirs = dict()
entries = ['names', 'frames', 'domains', 'images', 'masks']
for key_ in entries:
data_dirs[key_] = list()
if 'tcia' in domains:
paths_images_tcia = sorted((plb.Path(dir_images) / 'processed_tcia').rglob('PANCREAS*.nii.gz'))
paths_masks_tcia = [x_ for x_ in sorted((plb.Path(dir_masks) / 'processed_tcia').rglob('label*.nii.gz')) if '_pseudo' not in x_.name]
data_dirs['names'].extend([x_.name.split('.')[0] + '_tcia' for x_ in paths_images_tcia])
data_dirs['frames'].extend([x_.name.split('.')[0] + '_tcia' for x_ in paths_images_tcia])
data_dirs['domains'].extend(['tcia' for _ in paths_images_tcia])
data_dirs['images'].extend(paths_images_tcia)
data_dirs['masks'].extend(paths_masks_tcia)
if 'btcv' in domains:
paths_images_btcv = sorted((plb.Path(dir_images) / 'processed_btcv').rglob('img*.nii.gz'))
paths_masks_btcv = [x_ for x_ in sorted((plb.Path(dir_masks) / 'processed_btcv').rglob('label*.nii.gz')) if '_pseudo' not in x_.name]
data_dirs['names'].extend([x_.name.split('.')[0] + '_btcv' for x_ in paths_images_btcv])
data_dirs['frames'].extend([x_.name.split('.')[0] + '_btcv' for x_ in paths_masks_btcv])
data_dirs['domains'].extend(['btcv' for _ in paths_images_btcv])
data_dirs['images'].extend(paths_images_btcv)
data_dirs['masks'].extend(paths_masks_btcv)
debug = False
if debug:
import matplotlib
import nibabel as nib
matplotlib.use('tkagg')
viewer = nib.viewers.OrthoSlicer3D(np.array(np.stack([nib.load(paths_images_tcia[0]).get_fdata(),
nib.load(paths_masks_tcia[0]).get_fdata()], axis=-1)))
viewer.show()
viewer = nib.viewers.OrthoSlicer3D(np.array(np.stack([nib.load(paths_images_btcv[0]).get_fdata(),
nib.load(paths_masks_btcv[0]).get_fdata()], axis=-1)))
viewer.show()
df_dirs = pd.DataFrame(data_dirs)
df_dirs = df_dirs.assign(annotated=True)
df_dirs = df_dirs.assign(weights=1.0) # Default weight is 1.0
return df_dirs
def add_data_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--dir_images', default='/mnt/SSD_SATA_03/data_med/prompting/', type=str) # EDIT ME
parser.add_argument('--dir_masks', default='/mnt/SSD_SATA_03/data_med/prompting/', type=str) # EDIT ME
parser.add_argument('--n_students', default=2, type=int)
parser.add_argument('--patch_size_students', default="224,224,1; 160,160,1", type=list_of_tupels)
parser.add_argument('--patch_size_teacher', default=[256, 256, 1], nargs=3, type=int)
parser.add_argument('--in_channels', default=1, type=int)
parser.add_argument('--out_channels', default=9, type=int)
parser.add_argument('--out_channels_pseudo', default=51, type=int)
parser.add_argument('--masked_modality', default='', type=str, choices=['tcia', 'btcv'])
parser.add_argument('--domains', default=['tcia', 'btcv']) # Present domains. Used e.g. for domain-wise prototypes
parser.add_argument('--num_annotated', default=-1, type=int) # Determines amount of annotated subjects available during training. 3 ~ 10%, 6 ~ 20%, 15 ~ 50%.
parser.add_argument('--additive_alpha', default=[0., 0.07590447, 0.13365165, 0.85956404, 1.67616135, 0.01332489, 0.05445146, 0.29287293, 0.25115765], type=float) # Additive alpha value based on foreground / background ratio. 0 for background.
parser.add_argument('--additive_alpha_factor', default=0.001, type=float) # Factor to compress the range of minimal (0.) and maximal additive alpha value
return parser
def list_of_tupels(args):
lists_ = [x_.split(',') for x_ in args.replace(' ','').split(';')]
tuples_ = list()
for list_ in lists_:
tuples_.append(tuple([int(x_) for x_ in list_]))
return tuples_
| Python |
3D | marcdcfischer/PUNet | src/data/datasets/prepare_tcia.py | .py | 5,147 | 108 | import pathlib as plb
import re
import csv
import nibabel as nib
import numpy as np
from sklearn import preprocessing
from src.data.datasets.pre_processing import _rescale
from typing import Tuple
def _process(path_img,
path_lbl,
coords,
dir_out,
classes: Tuple[int, ...] = (0, 1, 3, 4, 5, 6, 7, 11, 14),
view: bool = False):
nii_image, nii_lbl = nib.load(path_img), nib.load(path_lbl)
np_image = nii_image.get_fdata(dtype=np.float32)
np_label = np.flip(nii_lbl.get_fdata(dtype=np.float32).round().astype(np.int32), axis=2)
assert np_image.shape == np_label.shape
if view:
viewer = nib.viewers.OrthoSlicer3D(np.stack([np_image / 255. * 10., np_label], axis=-1))
viewer.show()
# Crop (according to given CSV) +- x
additional_space = 24
additional_space_z = 3
np_image = np_image[max(coords[0] - additional_space, 0):min(coords[1] + additional_space, np_image.shape[0]),
max(np_image.shape[1] - coords[3] - additional_space, 0):min(np_image.shape[1] - coords[2] + additional_space, np_image.shape[1]),
max(coords[4] - additional_space_z, 0):min(coords[5] + additional_space_z, np_image.shape[2])]
np_label = np_label[max(coords[0] - additional_space, 0):min(coords[1] + additional_space, np_label.shape[0]),
max(np_label.shape[1] - coords[3] - additional_space, 0):min(np_label.shape[1] - coords[2] + additional_space, np_label.shape[1]),
max(coords[4] - additional_space_z, 0):min(coords[5] + additional_space_z, np_label.shape[2])]
if view:
viewer = nib.viewers.OrthoSlicer3D(np.stack([np_image / 255. * 10., np_label], axis=-1))
viewer.show()
# Label encoding
print(f'Selecting labels {classes} out of {np.unique(np_label)}.')
le = preprocessing.LabelEncoder()
# Classes: 1. Spleen, 3. L. Kidney, 4. Gallbladder, 5. Esophagus, 6. Liver, 7. Stomach, 11. Pancreas, 14. Duodenum
# Groups: 1. Spleen, L. Kidney, Gallbladder, Liver, 2. Esophagus, Stomach, Pancreas, Duodenum
le.classes_ = classes
mask = np.full_like(np_label, fill_value=1).astype(bool)
for class_ in classes:
mask = np.logical_and(mask, np_label != class_)
np_label[mask] = 0
np_label = le.transform(np_label.reshape(-1)).reshape(np_label.shape)
if view:
viewer = nib.viewers.OrthoSlicer3D(np.stack([np_image / 255. * 10., np_label], axis=-1))
viewer.show()
nii_image_converted = nib.Nifti1Image(np_image, nii_image.affine, nii_image.header)
nii_lbl_converted = nib.Nifti1Image(np_label, nii_lbl.affine, nii_lbl.header)
nib.save(nii_image_converted, plb.Path(dir_out) / (path_img.name + '.gz'))
nib.save(nii_lbl_converted, plb.Path(dir_out) / path_lbl.name)
if __name__ == '__main__':
# data from https://zenodo.org/record/1169361#.YqhgFXhBxhH
dir_data = '/mnt/SSD_SATA_03/data_med/prompting/multi-organ/'
dir_out = '/mnt/SSD_SATA_03/data_med/prompting/processed_tcia_fixed_spacing/'
plb.Path(dir_out).mkdir(parents=True, exist_ok=True)
target_resolution = (1.25, 1.25, 2.5)
target_size = (280, 280, -1)
classes = (0, 1, 3, 4, 5, 6, 7, 11, 14)
view = False
overwrite = True
# Preprocess data - cropping via csv
crop_coords = dict()
with open(plb.Path(dir_data) / 'cropping.csv') as file_:
reader = csv.DictReader(file_)
for row in reader:
if row['publisher'] == 'TCIA':
crop_coords[row['original_id'].zfill(4)] = [int(x_) for x_ in [row['extent_left'], row['extent_right'],
row['extent_ant'], row['extent_post'],
row['extent_inf'], row['extent_sup']]] # coords reordered due to wrong entries
paths_imgs = sorted(list((plb.Path(dir_data) / 'image_tcia_multiorgan').glob('*.nii')))
paths_lbls = sorted(list((plb.Path(dir_data) / 'label_tcia_multiorgan').glob('*.nii.gz')))
assert len(paths_imgs) == len(paths_lbls)
img_numbers = [re.search('\d{4}', str(x_.name)).group(0) for x_ in paths_imgs]
lbl_numbers = [re.search('\d{4}', str(x_.name)).group(0) for x_ in paths_lbls]
assert set(img_numbers) == set(lbl_numbers)
for path_img_, path_lbl_ in zip(paths_imgs, paths_lbls):
print(f'--- Processing {path_img_.name} ---')
id_ = re.search('\d{4}', str(path_img_.name)).group(0)
coords_ = crop_coords[id_]
if overwrite:
_process(path_img=path_img_,
path_lbl=path_lbl_,
coords=coords_,
dir_out=dir_out,
classes=classes,
view=view)
_rescale(path_images=[str(plb.Path(dir_out) / (path_img_.name + '.gz'))],
path_mask=str(plb.Path(dir_out) / path_lbl_.name),
target_resolution=target_resolution,
target_size=target_size,
view=view)
| Python |
3D | marcdcfischer/PUNet | src/data/datasets/gather_ctorg.py | .py | 4,237 | 86 | from typing import Union, Dict, Tuple
from argparse import ArgumentParser, Namespace
import pathlib as plb
import pandas as pd
import numpy as np
from typing import List
from src.data.datasets.gather_data import _generate_splits, _mask_domains
def generate_dataframes(conf: Union[Dict, Namespace]):
df_dirs = _gather_data(dir_images=conf.dir_images,
dir_masks=conf.dir_masks,
domains=conf.domains)
df_train, df_val, df_test = _generate_splits(df_dirs,
num_annotated=conf.num_annotated,
domains=conf.domains,
max_subjects_train=conf.max_subjects_train)
df_train = _mask_domains(df_train,
modality=conf.masked_modality,
valid_choices=conf.domains)
return df_train, df_val, df_test
def _gather_data(dir_images: str,
dir_masks: str,
domains: List[str]):
data_dirs = dict()
for key_ in ['names', 'frames', 'domains', 'images', 'masks']:
data_dirs[key_] = list()
if 'ctorg' in domains:
paths_images_tcia = sorted((plb.Path(dir_images) / 'processed_ctorg').rglob('volume*.nii.gz'))
paths_masks_tcia = sorted((plb.Path(dir_masks) / 'processed_ctorg').rglob('labels*.nii.gz'))
data_dirs['names'].extend([x_.name.split('.')[0] + '_ctorg' for x_ in paths_images_tcia])
data_dirs['frames'].extend([x_.name.split('.')[0] + '_ctorg' for x_ in paths_images_tcia])
data_dirs['domains'].extend(['ctorg' for _ in paths_images_tcia])
data_dirs['images'].extend(paths_images_tcia)
data_dirs['masks'].extend(paths_masks_tcia)
debug = False
if debug:
import matplotlib
import nibabel as nib
matplotlib.use('tkagg')
viewer = nib.viewers.OrthoSlicer3D(np.array(np.stack([nib.load(paths_images_tcia[0]).get_fdata() / 255.,
nib.load(paths_masks_tcia[0]).get_fdata()], axis=-1)))
viewer.show()
df_dirs = pd.DataFrame(data_dirs)
df_dirs = df_dirs.assign(annotated=True)
df_dirs = df_dirs.assign(weights=1.0) # Default weight is 1.0
return df_dirs
def add_data_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--dir_images', default='/mnt/SSD_SATA_03/data_med/prompting/', type=str) # EDIT ME
parser.add_argument('--dir_masks', default='/mnt/SSD_SATA_03/data_med/prompting/', type=str) # EDIT ME
# parser.add_argument('--dir_scribbles', default='/mnt/SSD_SATA_03/data_med/scribbles/acdc_scribbles_2020_fixed', type=str)
# parser.add_argument('--image_size', default=[64, 64, 48], nargs=3, type=int)
parser.add_argument('--n_students', default=2, type=int)
parser.add_argument('--patch_size_students', default="224,224,1; 160,160,1", type=list_of_tupels)
parser.add_argument('--patch_size_teacher', default=[256, 256, 1], nargs=3, type=int)
parser.add_argument('--in_channels', default=1, type=int)
parser.add_argument('--out_channels', default=6, type=int)
parser.add_argument('--masked_modality', default='', type=str, choices=['ctorg'])
parser.add_argument('--domains', default=['ctorg']) # Present domains. Used e.g. for domain-wise prototypes
parser.add_argument('--num_annotated', default=-1, type=int) # Determines amount of annotated subjects available during training. 10 ~ 10%, 19 ~ 20%, 48 ~ 50%.
parser.add_argument('--additive_alpha', default=[0., 0.06171474, 0.74141834, 0.16770616, 0.00349171, 0.20303888], type=float) # Additive alpha value based on foreground / background ratio. 0 for background.
parser.add_argument('--additive_alpha_factor', default=0.01, type=float) # Factor to compress the range of minimal (0.) and maximal additive alpha value
return parser
def list_of_tupels(args):
lists_ = [x_.split(',') for x_ in args.replace(' ','').split(';')]
tuples_ = list()
for list_ in lists_:
tuples_.append(tuple([int(x_) for x_ in list_]))
return tuples_
| Python |
3D | marcdcfischer/PUNet | src/data/datasets/prepare_btcv.py | .py | 5,370 | 111 | import pathlib as plb
import re
import csv
import nibabel as nib
import numpy as np
from sklearn import preprocessing
from src.data.datasets.pre_processing import _rescale
from typing import Tuple
def _process(path_img,
path_lbl,
coords,
dir_out,
classes: Tuple[int, ...] = (0, 1, 3, 4, 5, 6, 7, 11, 14),
view: bool = False):
nii_image, nii_lbl = nib.load(path_img), nib.load(path_lbl)
np_image = nii_image.get_fdata(dtype=np.float32)
np_label = nii_lbl.get_fdata(dtype=np.float32).round().astype(np.int32)
assert np_image.shape == np_label.shape
if view:
viewer = nib.viewers.OrthoSlicer3D(np.stack([np_image / 255. * 10., np_label], axis=-1))
viewer.show()
# Crop (according to given CSV) +- x
print(f'Cropping image of size {np_image.shape}')
additional_space = 24
additional_space_z = 3
np_image = np_image[max(coords[0] - additional_space, 0):min(coords[1] + additional_space, np_image.shape[0]),
max(np_image.shape[1] - coords[3] - additional_space, 0):min(np_image.shape[1] - coords[2] + additional_space, np_image.shape[1]),
max(coords[4] - additional_space_z, 0):min(coords[5] + additional_space_z, np_image.shape[2])]
np_label = np_label[max(coords[0] - additional_space, 0):min(coords[1] + additional_space, np_label.shape[0]),
max(np_label.shape[1] - coords[3] - additional_space, 0):min(np_label.shape[1] - coords[2] + additional_space, np_label.shape[1]),
max(coords[4] - additional_space_z, 0):min(coords[5] + additional_space_z, np_label.shape[2])]
print(f'Cropped image to size {np_image.shape}')
if view:
viewer = nib.viewers.OrthoSlicer3D(np.stack([np_image / 255. * 10., np_label], axis=-1))
viewer.show()
# Label encoding
print(f'Selecting labels {classes} out of {np.unique(np_label)}.')
le = preprocessing.LabelEncoder()
# Classes: 1. Spleen, 2. R. Kidney, 3. L. Kidney, 4. Gallbladder, 5. Esophagus, 6. Liver, 7. Stomach,
# 8. Aorta, 9. Inf. Vena Cava, 10. Portal Vein and Splenic Vein, 11. Pancreas, 12. Right Adrenal Gland, 13. Left Adrenal Gland, 14. Duodenum
# Groups: 1. Spleen, L. Kidney, Gallbladder, Liver, 2. Esophagus, Stomach, Pancreas, Duodenum
le.classes_ = classes
mask = np.full_like(np_label, fill_value=1).astype(bool)
for class_ in classes:
mask = np.logical_and(mask, np_label != class_)
np_label[mask] = 0
np_label = le.transform(np_label.reshape(-1)).reshape(np_label.shape)
if view:
viewer = nib.viewers.OrthoSlicer3D(np.stack([np_image / 255. * 10., np_label], axis=-1))
viewer.show()
nii_image_converted = nib.Nifti1Image(np_image, nii_image.affine, nii_image.header)
nii_lbl_converted = nib.Nifti1Image(np_label, nii_lbl.affine, nii_lbl.header)
nib.save(nii_image_converted, plb.Path(dir_out) / path_img.name)
nib.save(nii_lbl_converted, plb.Path(dir_out) / path_lbl.name)
if __name__ == '__main__':
# data from https://zenodo.org/record/1169361#.YqhgFXhBxhH
dir_data = '/mnt/SSD_SATA_03/data_med/prompting/multi-organ/'
dir_out = '/mnt/SSD_SATA_03/data_med/prompting/processed_btcv_fixed_spacing/'
plb.Path(dir_out).mkdir(parents=True, exist_ok=True)
target_resolution = (1.25, 1.25, 2.5)
target_size = (280, 280, -1)
classes = (0, 1, 3, 4, 5, 6, 7, 11, 14)
view = False
overwrite = True
# Preprocess data - cropping via csv
crop_coords = dict()
with open(plb.Path(dir_data) / 'cropping.csv') as file_:
reader = csv.DictReader(file_)
for row in reader:
if row['publisher'] == 'Synapse':
crop_coords[row['original_id'].zfill(4)] = [int(x_) for x_ in [row['extent_left'], row['extent_right'],
row['extent_ant'], row['extent_post'],
row['extent_inf'], row['extent_sup']]] # coords reordered due to wrong entries
paths_imgs = sorted(list((plb.Path(dir_data) / 'image_btcv_multiorgan').glob('*.nii.gz')))
paths_lbls = sorted(list((plb.Path(dir_data) / 'label_btcv_multiorgan').glob('[!LAS]*.nii.gz')))
assert len(paths_imgs) == len(paths_lbls)
img_numbers = [re.search('\d{4}', str(x_.name)).group(0) for x_ in paths_imgs]
lbl_numbers = [re.search('\d{4}', str(x_.name)).group(0) for x_ in paths_lbls]
assert set(img_numbers) == set(lbl_numbers)
for path_img_, path_lbl_ in zip(paths_imgs, paths_lbls):
print(f'--- Processing {path_img_.name} ---')
id_ = re.search('\d{4}', str(path_img_.name)).group(0)
coords_ = crop_coords[id_]
if overwrite:
_process(path_img=path_img_,
path_lbl=path_lbl_,
coords=coords_,
dir_out=dir_out,
classes=classes,
view=view)
_rescale(path_images=[str(plb.Path(dir_out) / path_img_.name)],
path_mask=str(plb.Path(dir_out) / path_lbl_.name),
target_resolution=target_resolution,
target_size=target_size,
view=view)
| Python |
3D | marcdcfischer/PUNet | src/data/datasets/pre_processing.py | .py | 8,278 | 170 | import dicom2nifti
import nibabel as nib
from monai import transforms as mtransforms
from nibabel.orientations import ornt_transform, axcodes2ornt, inv_ornt_aff, apply_orientation, io_orientation, aff2axcodes
from typing import Tuple, Optional, List
import numpy as np
import pathlib as plb
from skimage import transform as stransform
def reorient_nii(nib_image: nib.Nifti1Image,
affine: Optional[np.array] = None,
axes_code: Tuple[str, ...] = ('L', 'A', 'S'), # expects w h d format (not like torch, which uses h w d)
verbose: bool = True):
volume_old = nib_image.get_fdata()
affine_old = affine if affine is not None else nib_image.affine # Use passed affine (e.g. to overwrite disparate affine in mask)
target_orientation = axcodes2ornt(axes_code)
ornt_trans = ornt_transform(io_orientation(affine_old), target_orientation)
affine_trans = inv_ornt_aff(ornt_trans, volume_old.shape)
affine_new = np.dot(affine_old, affine_trans)
volume_new = apply_orientation(volume_old, ornt_trans)
if verbose:
print(f'Orientation change: {aff2axcodes(affine_old)} -> {aff2axcodes(affine_new)}')
print(f'Affine change:\n{affine_old}\n->\n{affine_new}')
nib_image_new = nib.Nifti1Image(volume_new, affine_new, nib_image.header)
return nib_image_new
def reorient_acdc(dir_images: str,
dir_masks: str,
dir_scribbles: str,
dir_images_out: str,
dir_masks_out: str,
dir_scribbles_out: str):
path_images = sorted(plb.Path(dir_images).rglob('*[0-9].nii.gz'))
path_masks = sorted(plb.Path(dir_masks).rglob('*_gt.nii.gz'))
path_scribbles = sorted(plb.Path(dir_scribbles).rglob('*_scribble.nii.gz'))
assert len(path_images) == len(path_masks) and len(path_images) == len(path_scribbles)
[plb.Path(x_).mkdir(parents=True, exist_ok=True) for x_ in [dir_images_out, dir_masks_out, dir_scribbles_out]]
for path_img_, path_mask_, path_scribbles_ in zip(path_images, path_masks, path_scribbles):
print(f'Fixing subject {path_img_.name} with mask {path_mask_.name} and scribbles {path_scribbles_.name}.')
nib_image_ = nib.load(path_img_)
nib_mask_ = nib.load(path_mask_)
nib_scribbles = nib.load(path_scribbles_)
common_affine = nib_image_.affine
nib_image_new = reorient_nii(nib_image=nib_image_, affine=common_affine)
nib_mask_new = reorient_nii(nib_image=nib_mask_, affine=common_affine)
nib_scribbles_new = reorient_nii(nib_image=nib_scribbles, affine=common_affine)
path_out_image_ = plb.Path(dir_images_out) / path_img_.relative_to(plb.Path(dir_images))
path_out_mask_ = plb.Path(dir_masks_out) / path_mask_.relative_to(plb.Path(dir_masks))
path_out_scribbles_ = plb.Path(dir_scribbles_out) / path_scribbles_.relative_to(plb.Path(dir_scribbles))
nib.save(nib_image_new, path_out_image_)
nib.save(nib_mask_new, path_out_mask_)
nib.save(nib_scribbles_new, path_out_scribbles_)
def _convert_dcm_to_nifti(dir_in: str,
path_out: str):
print(f'Converting {dir_in} to {path_out}.')
dicom2nifti.dicom_series_to_nifti(dir_in, path_out, reorient_nifti=False)
def _rescale(path_images: List[str],
path_mask: str = '',
path_scribbles: str = '',
target_resolution: Tuple[float, ...] = (1.0, 1.0, 1.0),
target_size: Tuple[int, ...] = (256, 256, -1),
rescale_z: bool = True,
view: bool = False):
nii_images = [nib.load(x_) for x_ in path_images]
np_images = [x_.get_fdata() for x_ in nii_images]
np_mask = None
if path_mask:
nii_mask = nib.load(path_mask)
np_mask = nii_mask.get_fdata(dtype=np.float32).round().astype(np.int32)
np_scribbles = None
if path_scribbles:
nii_scribbles = nib.load(path_scribbles)
np_scribbles = nii_scribbles.get_fdata()
if view:
viewer = nib.viewers.OrthoSlicer3D(np.stack([np_images[0] / 255. * 10, np_mask], axis=-1))
viewer.show()
pix_dim = nii_images[0].header['pixdim'][1:4]
scale = [x_ / y_ for x_, y_ in zip(pix_dim, target_resolution)]
scale[2] = 1. if not rescale_z else scale[2]
new_affine = nii_images[0].affine
new_affine[0, 0] = new_affine[0, 0] / scale[0]
new_affine[1, 1] = new_affine[1, 1] / scale[1]
new_affine[2, 2] = new_affine[2, 2] / scale[2]
for idx_img in range(len(path_images)):
np_image_rescaled_ = stransform.rescale(np_images[idx_img],
scale,
order=1,
preserve_range=True,
mode='constant',
cval=np_images[idx_img].min())
if view:
viewer = nib.viewers.OrthoSlicer3D(np_image_rescaled_)
viewer.show()
np_image_resized_ = mtransforms.ResizeWithPadOrCrop(spatial_size=target_size, mode='constant', constant_values=np.min(np_image_rescaled_))(np.expand_dims(np_image_rescaled_, 0))[0]
if view:
viewer = nib.viewers.OrthoSlicer3D(np_image_resized_)
viewer.show()
print(f'Writing scaled image with scale factor {scale} -> {target_resolution} \n'
f'and size {np_images[0].shape} -> {np_image_rescaled_.shape} -> {np_image_resized_.shape} \n'
f'for {path_images[idx_img]} \n'
f'with affine {new_affine}.')
nii_image_converted = nib.Nifti1Image(np_image_resized_.astype(np.float32), new_affine, nii_images[0].header)
nib.save(nii_image_converted, path_images[idx_img])
if np_mask is not None:
np_mask_rescaled = stransform.rescale(np_mask,
scale,
order=0,
preserve_range=True,
anti_aliasing=False,
mode='constant',
cval=0)
np_mask_resized = mtransforms.ResizeWithPadOrCrop(spatial_size=target_size, mode='constant', constant_values=0)(np.expand_dims(np_mask_rescaled, 0))[0]
nii_mask_converted = nib.Nifti1Image(np_mask_resized.astype(np.int32), new_affine, nii_images[0].header)
nib.save(nii_mask_converted, path_mask)
if np_scribbles is not None:
np_scribbles_rescaled = stransform.rescale(np_scribbles,
scale,
order=0,
preserve_range=True,
anti_aliasing=False,
mode='constant',
cval=0)
np_scribbles_resized = mtransforms.ResizeWithPadOrCrop(spatial_size=target_size, mode='constant', constant_values=0)(np.expand_dims(np_scribbles_rescaled, 0))[0]
nii_scribbles_converted = nib.Nifti1Image(np_scribbles_resized.astype(np.int32), new_affine, nii_images[0].header)
nib.save(nii_scribbles_converted, path_scribbles)
if view:
viewer = nib.viewers.OrthoSlicer3D(np.stack([np_image_rescaled_ / 255. * 10, np_mask_rescaled], axis=-1))
viewer.show()
if __name__ == '__main__':
# Note: new SimpleITK version (used in torchio) is extremely sensitive to affine errors.
images_in = '/mnt/SSD_SATA_03/data_med/acdc/raw/training'
images_out = '/mnt/SSD_SATA_03/data_med/acdc/fixed_affine/training'
scribbles_in = '/mnt/SSD_SATA_03/data_med/scribbles/acdc_scribbles_2020'
scribbles_out = '/mnt/SSD_SATA_03/data_med/scribbles/acdc_scribbles_2020_fixed'
reorient_acdc(dir_images=images_in,
dir_masks=images_in,
dir_scribbles=scribbles_in,
dir_images_out=images_out,
dir_masks_out=images_out,
dir_scribbles_out=scribbles_out)
| Python |
3D | marcdcfischer/PUNet | src/data/datasets/prepare_ctorg.py | .py | 3,657 | 92 | import pathlib as plb
import re
import csv
import nibabel as nib
import numpy as np
from sklearn import preprocessing
from src.data.datasets.pre_processing import _rescale
from typing import Tuple
from p_tqdm import p_map
def _process(path_img,
path_lbl,
dir_out,
classes: Tuple[int, ...] = (0, 1, 2, 3, 4, 5),
view: bool = False):
nii_image, nii_lbl = nib.load(path_img), nib.load(path_lbl)
np_image = nii_image.get_fdata(dtype=np.float32)
np_label = nii_lbl.get_fdata(dtype=np.float32).round().astype(np.int32)
assert np_image.shape == np_label.shape
if view:
viewer = nib.viewers.OrthoSlicer3D(np.stack([np_image / 255. * 10., np_label], axis=-1))
viewer.show()
if 1 not in np.unique(np_label):
print('Case of missing liver label. Subject will be discarded.')
return False
# Label encoding
print(f'Selecting labels {classes} out of {np.unique(np_label)}.')
le = preprocessing.LabelEncoder()
# Classes: 0: Background (None of the following organs), 1: Liver, 2: Bladder, 3: Lungs, 4: Kidneys, 5: Bone, 6: Brain
# Groups: 1. Liver, Bladder, Kidneys, 2. Lungs, Bone
le.classes_ = classes
mask = np.full_like(np_label, fill_value=1).astype(bool)
for class_ in classes:
mask = np.logical_and(mask, np_label != class_)
np_label[mask] = 0
np_label = le.transform(np_label.reshape(-1)).reshape(np_label.shape)
if view:
viewer = nib.viewers.OrthoSlicer3D(np.stack([np_image / 255. * 10., np_label], axis=-1))
viewer.show()
nii_image_converted = nib.Nifti1Image(np_image, nii_image.affine, nii_image.header)
nii_lbl_converted = nib.Nifti1Image(np_label, nii_lbl.affine, nii_lbl.header)
nib.save(nii_image_converted, plb.Path(dir_out) / path_img.name)
nib.save(nii_lbl_converted, plb.Path(dir_out) / path_lbl.name)
return True
if __name__ == '__main__':
# data from https://wiki.cancerimagingarchive.net/display/Public/CT-ORG%3A+CT+volumes+with+multiple+organ+segmentations#61080890bcab02c187174a288dbcbf95d26179e8
dir_data = '/mnt/SSD_SATA_03/data_med/prompting/OrganSegmentations/'
dir_out = '/mnt/SSD_SATA_03/data_med/prompting/processed_ctorg_fixed_spacing/'
plb.Path(dir_out).mkdir(parents=True, exist_ok=True)
target_resolution = (1.25, 1.25, 2.5)
target_size = (280, 280, -1)
classes = (0, 1, 2, 3, 4, 5)
view = False
overwrite = True
paths_imgs = sorted(list(plb.Path(dir_data).glob('volume*.nii.gz')))
paths_lbls = sorted(list(plb.Path(dir_data).glob('labels*.nii.gz')))
assert len(paths_imgs) == len(paths_lbls)
img_numbers = [re.search('\d+', str(x_.name)).group(0) for x_ in paths_imgs]
lbl_numbers = [re.search('\d+', str(x_.name)).group(0) for x_ in paths_lbls]
assert set(img_numbers) == set(lbl_numbers)
def _process_single(path_img_, path_lbl_):
print(f'--- Processing {path_img_.name} ---')
if overwrite:
valid = _process(path_img=path_img_,
path_lbl=path_lbl_,
dir_out=dir_out,
classes=classes,
view=view)
if valid:
_rescale(path_images=[str(plb.Path(dir_out) / path_img_.name)],
path_mask=str(plb.Path(dir_out) / path_lbl_.name),
target_resolution=target_resolution,
target_size=target_size,
view=view)
p_map(_process_single,
paths_imgs,
paths_lbls,
num_cpus=0.5)
| Python |
3D | marcdcfischer/PUNet | src/data/datasets/gather_data.py | .py | 3,327 | 73 | from typing import Union, Dict, Tuple
from argparse import ArgumentParser, Namespace
import pathlib as plb
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold, train_test_split
import math
def _generate_splits(df,
domains: Tuple[str, ...] = ('tcia', 'btcv'),
max_subjects_train: int = -1,
num_annotated: int = -1,
ratios: Tuple[float, float, float] = (0.7, 0.1, 0.2),
shuffle: bool = True,
random_state: int = 1,
verbose: bool = True):
assert sum(ratios) == 1.
patients = df['names'].unique()
patients_all = [[x_ for x_ in patients if domain_ in x_] for domain_ in domains]
dfs_train, dfs_val, dfs_test = list(), list(), list()
for idx_, patients_ in enumerate(patients_all):
if len(patients_) > 0:
patients_train_val, patients_test = train_test_split(patients_, test_size=ratios[2], shuffle=shuffle, random_state=random_state)
patients_train, patients_val = train_test_split(patients_train_val, test_size=ratios[1] / (1 - ratios[2]), shuffle=shuffle, random_state=random_state)
if max_subjects_train > 0: # hard cut of amount of training data
patients_train = patients_train[:max_subjects_train]
# Limit training data to percentage of available data. patients_train are not reordered again (since they are already shuffled)
n_patients_train_full = len(patients_train)
if num_annotated > 0:
assert num_annotated <= len(patients_train) # Otherwise you're trying to grab to many subjects
patients_train = patients_train[:num_annotated]
print(f'Using {len(patients_train)} subjects of the available {n_patients_train_full} (~{(len(patients_train) / n_patients_train_full) * 100.} %) for training.')
dfs_train.append(df[df['names'].isin(patients_train)])
dfs_val.append(df[df['names'].isin(patients_val)])
dfs_test.append(df[df['names'].isin(patients_test)])
df_train = pd.concat(dfs_train)
df_val = pd.concat(dfs_val)
df_test = pd.concat(dfs_test)
if verbose:
print(f'Split dataset into train, val and test of length: {len(df_train)}, {len(df_val)}, {len(df_test)}.')
print(f'Training on \n{df_train}.')
print(f'Validating on \n{df_val}.')
print(f'Testing on \n{df_test}')
return df_train, df_val, df_test
def _mask_domains(df,
valid_choices: str,
modality: str = 't2spir',
verbose: bool = True):
if modality:
if modality not in valid_choices:
raise ValueError(f'The modality {modality} is not a valid choice of {valid_choices}')
# Mask selected modality
df.loc[df['domains'].str.contains(modality), 'annotated'] = False # TODO: Suppress warning / replace with more safe change?
if verbose:
print(f'Masked modality {modality}.')
print(f'Masked {df[df["annotated"] == False]["frames"].values} as non-annotated.')
print(f'Left {df[df["annotated"] == True]["frames"].values} unchanged.')
else:
print(f'No masking is performed.')
return df
| Python |
3D | marcdcfischer/PUNet | shell/train.sh | .sh | 5,455 | 119 | #!/bin/bash
# Check amount of arguments
if [[ $# -lt 14 ]] ; then
echo 'A wrong amount of arguments (< 14) has been provided.'
exit 1
fi
# load env variables
username="${14}"
CLUSTER="my_cluster" # EDIT ME
PYTHON="/my/python/versions/3.9.8/bin/python" # EDIT ME
CODE="/my/code/" # EDIT ME
export PYTHONPATH="${CODE}" # EDIT ME
# Parse arguments as GPU list
export CUDA_VISIBLE_DEVICES="$1"
echo "Using CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES}"
gpus="--gpus 1"
# set main params
script="main.py"
dir_images="--dir_images /my/data/" # EDIT ME
dir_masks="--dir_masks /my/data/" # EDIT ME
# default parameters
max_epochs=400
max_epochs_downstream=100
batch_size=16
learning_rate=1e-4
learning_rate_downstream=5e-4
learning_rate_instructions=1e-3
learning_rate_instructions_downstream=5e-4
loss_weight_segmentation=1e-0
loss_weight_sim_protos=1e-2
tokens_per_instruction=16
num_workers=16
# configurations
cfg_training="$2"
cfg_frozen="$3"
cfg_bias="$4"
cfg_labels="$5"
cfg_downstream="$6"
cfg_dimensions="$7"
cfg_architecture="$8"
cfg_prompting="$9"
cfg_adaptation="${10}"
cfg_dataset="${11}"
cfg_amount="${12}"
source ../common/cfgs.sh
# fetch checkpoint (if available)
ckpt="" # "--ckpt "
if [ "${13}" == "none" ]; then
echo "Using no ckpt."
else
ckpt="--ckpt_run_name ${13}"
echo "Using ckpt ${13}."
fi
# ablations
ablations="${*:15}" # Anything passed (directly) as flag during shell script call.
ablations="${ablations}"
if [ "$loss_meta" == false ]; then loss_weight_segmentation=0.; fi;
if [ "$loss_self" == false ]; then loss_weight_sim_protos=0.; fi;
if [ "$downstream" == true ]; then ablations="${ablations} --downstream --no_overwrite --cold_start"; fi;
if [ "${architecture}" == "wip" ] || [ "${architecture}" == "wip_simple" ]; then
if [ "$noninstructed" == true ]; then ablations="${ablations} --noninstructed_attention"; fi;
if [ "$noninstructed_downstream" == true ]; then ablations="${ablations} --noninstructed_attention_downstream"; fi;
if [ "$frozen" == true ]; then ablations="${ablations} --selective_freezing"; fi;
if [ "$bias_instructions" == false ]; then ablations="${ablations} --no_bias_instructions"; fi;
if [ "$bias_content" == false ]; then ablations="${ablations} --no_bias_content"; fi;
if [ "$bias_vit" == true ]; then ablations="${ablations} --bias_vit"; fi;
if [ "$fixed_output" == true ]; then ablations="${ablations} --fixed_output"; fi;
fi;
# parameters (potentially adjusted by above ablation statements)
parameters="--batch_size ${batch_size} --learning_rate ${learning_rate} --learning_rate_downstream ${learning_rate_downstream} --learning_rate_instructions ${learning_rate_instructions} --learning_rate_instructions_downstream ${learning_rate_instructions_downstream}"
parameters="${parameters} --architecture ${architecture}"
parameters="${parameters} --loss_weight_segmentation ${loss_weight_segmentation} --loss_weight_sim_protos ${loss_weight_sim_protos}"
parameters="${parameters} --dataset ${dataset} --num_workers ${num_workers}"
parameters="${parameters} --num_annotated ${num_annotated}"
if [ "$downstream" == true ]; then parameters="${parameters} --max_epochs ${max_epochs_downstream}"; else parameters="${parameters} --max_epochs ${max_epochs}"; fi;
if [ "${architecture}" == "wip" ] || [ "${architecture}" == "wip_simple" ]; then
parameters="${parameters} --label_indices_base ${label_indices_base} --tokens_per_instruction ${tokens_per_instruction}"
parameters="${parameters} --patch_size_students ${patch_size_students} --patch_size_teacher ${patch_size_teacher} --attn_window_size ${attn_window_size}"
parameters="${parameters} --prompting_variant ${prompting_variant} --adaptation_variant ${adaptation_variant}"
fi;
# tags
tags="--tags"
# tags from configuration
tags="${tags} dim_${cfg_dimensions} nn_${architecture} data_${dataset} user_${username} cl_${CLUSTER}"
tags="${tags} lr_${learning_rate} lrds_${learning_rate_downstream} lri_${learning_rate_instructions} lrids_${learning_rate_instructions_downstream} bs_${batch_size} na_${num_annotated}"
if [ "$loss_meta" == true ]; then tags="${tags} loss_meta"; fi;
if [ "$loss_self" == true ]; then tags="${tags} loss_self"; fi;
if [ "${13}" != "none" ]; then tags="${tags} ckpt_${13:17}"; fi;
if [ "$downstream" == true ]; then tags="${tags} downstream"; fi;
if [ "${architecture}" == "wip" ] || [ "${architecture}" == "wip_simple" ]; then
tags="${tags} tk_${tokens_per_instruction} pv_${prompting_variant} av_${adaptation_variant}"
if [ "$noninstructed" == true ]; then tags="${tags} noninstructed"; fi;
if [ "$noninstructed_downstream" == true ]; then tags="${tags} noninstructed_ds"; fi;
if [ "$fixed_output" == true ]; then tags="${tags} fixed_output"; fi;
if [ "$frozen" == true ]; then tags="${tags} frozen"; fi;
if [ "$bias_instructions" == true ]; then tags="${tags} bias_instructions"; fi;
if [ "$bias_content" == true ]; then tags="${tags} bias_content"; fi;
if [ "$bias_vit" == true ]; then tags="${tags} bias_vit"; fi;
fi;
# Run python code
timestamp=$(date +%Y%m%d_%H%M%S)
std_out="std_${timestamp}.out"
std_err="std_${timestamp}.err"
echo "Using python: ${PYTHON}"
echo "Executing cmd: $PYTHON ${CODE}/src/${script} ${dir_images} ${dir_masks} ${gpus} ${parameters} ${ablations} ${tags} ${ckpt} > ${std_out} 2> ${std_err}"
$PYTHON ${CODE}/src/${script} ${dir_images} ${dir_masks} ${gpus} ${parameters} ${ablations} ${tags} ${ckpt} > ${std_out} 2> ${std_err}
| Shell |
3D | marcdcfischer/PUNet | shell/cfgs.sh | .sh | 7,069 | 236 | #!/bin/bash
# Configuration based on selected variants
echo "Using configurations -
training: ${cfg_training},
frozen: ${cfg_frozen},
bias: ${cfg_bias},
labels: ${cfg_labels},
downstream: ${cfg_downstream},
dimensions: ${cfg_dimensions},
architecture: ${cfg_architecture},
prompting: ${cfg_prompting},
adaptation: ${cfg_adaptation},
dataset ${cfg_dataset},
amount: ${cfg_amount}."
# Ablations
# losses / training scheme
# Note: noninstructed means that instructions are not passed to the attention layers. They can still be used for the final similarity comparison
if [ "${cfg_training}" == "meta_self" ]; then
loss_meta=true
loss_self=true
noninstructed=false
elif [ "${cfg_training}" == "meta_self_noninstructed" ]; then
loss_meta=true
loss_self=true
noninstructed=true
elif [ "${cfg_training}" == "meta" ]; then
loss_meta=true
loss_self=false
noninstructed=false
elif [ "${cfg_training}" == "meta_noninstructed" ]; then
# Only makes sense in combination with fixed layer
loss_meta=true
loss_self=false
noninstructed=true
elif [ "${cfg_training}" == "self" ]; then
loss_meta=false
loss_self=true
noninstructed=false
elif [ "${cfg_training}" == "self_noninstructed" ]; then
loss_meta=false
loss_self=true
noninstructed=true
else
echo "Invalid training configuration ${cfg_training}."
exit 1
fi
# (Downstream) frozen scheme
if [ "${cfg_frozen}" == "frozen" ]; then
frozen=true
elif [ "${cfg_frozen}" == "nonfrozen" ]; then
frozen=false
else
echo "Invalid prompting configuration ${cfg_frozen}."
exit 1
fi
# Bias score schemes
if [ "${cfg_bias}" == "all" ]; then
bias_instructions=true
bias_content=true
bias_vit=false
elif [ "${cfg_bias}" == "image_only" ]; then
bias_instructions=false
bias_content=true
bias_vit=false
elif [ "${cfg_bias}" == "pure" ]; then
bias_instructions=false
bias_content=false
bias_vit=false
elif [ "${cfg_bias}" == "vit" ]; then
bias_instructions=false
bias_content=false
bias_vit=true
else
echo "Invalid bias configuration ${cfg_bias}."
exit 1
fi
# Downstream phase
if [ "${cfg_downstream}" == false ]; then
downstream=false
elif [ "${cfg_downstream}" == true ]; then
downstream=true
else
echo "Invalid downstream configuration ${cfg_downstream}."
exit 1
fi
# 2D / 3D slice configurations
# Note: string with "own" format will be parsed for students: [(1, 2, 3), (4, 5, 6)] = "1,2,3;4,5,6"
if [ "${cfg_dimensions}" == "2d" ]; then
patch_size_students="224,224,1;160,160,1"
patch_size_teacher="256 256 1"
attn_window_size="8 8 1"
elif [ "${cfg_dimensions}" == "3d_flat" ]; then
patch_size_students="112,112,4;80,80,4"
patch_size_teacher="128 128 4"
attn_window_size="6 6 2"
elif [ "${cfg_dimensions}" == "3d_aniso" ]; then
patch_size_students="56,56,8;40,40,8"
patch_size_teacher="64 64 8"
attn_window_size="4 4 4"
elif [ "${cfg_dimensions}" == "3d_large" ]; then
patch_size_students="112,112,8;80,80,8"
patch_size_teacher="128 128 8"
attn_window_size="6 6 4"
else
echo "Invalid dimensions configuration ${cfg_dimensions}."
exit 1
fi
# Selectable architectures
if [ "${cfg_architecture}" == "wip" ]; then
architecture="wip"
elif [ "${cfg_architecture}" == "wip_simple" ]; then
architecture="wip_simple"
elif [ "${cfg_architecture}" == "unet" ]; then
architecture="unet"
elif [ "${cfg_architecture}" == "unetr" ]; then
architecture="unetr"
elif [ "${cfg_architecture}" == "swin_unetr" ]; then
architecture="swin_unetr"
else
echo "Invalid architecture configuration ${cfg_architecture}."
exit 1
fi
# Prompting variants
if [ "${cfg_prompting}" == "full" ]; then
prompting_variant="full"
elif [ "${cfg_prompting}" == "start" ]; then
prompting_variant="start"
elif [ "${cfg_prompting}" == "end" ]; then
prompting_variant="end"
elif [ "${cfg_prompting}" == "encoder" ]; then
prompting_variant="encoder"
elif [ "${cfg_prompting}" == "decoder" ]; then
prompting_variant="decoder"
else
echo "Invalid prompting variant configuration ${cfg_prompting}."
exit 1
fi
# Body adaptation ablations
if [ "${cfg_adaptation}" == "prompting" ]; then
adaptation_variant="prompting"
fixed_output=false
noninstructed_downstream=false
elif [ "${cfg_adaptation}" == "fixed" ]; then
adaptation_variant="fixed"
fixed_output=true
noninstructed_downstream=true
elif [ "${cfg_adaptation}" == "decoder" ]; then
adaptation_variant="decoder"
fixed_output=true
noninstructed_downstream=true
elif [ "${cfg_adaptation}" == "bias" ]; then
adaptation_variant="bias"
fixed_output=true
noninstructed_downstream=true
elif [ "${cfg_adaptation}" == "adapter" ]; then
adaptation_variant="adapter"
fixed_output=true
noninstructed_downstream=true
elif [ "${cfg_adaptation}" == "bias_prompting" ]; then
adaptation_variant="bias_prompting"
fixed_output=false
noninstructed_downstream=false
else
echo "Invalid adaptation configuration ${cfg_adaptation}."
exit 1
fi
# Available datasets
if [ "${cfg_dataset}" == "tcia_btcv" ]; then
dataset="tcia_btcv"
# Possible label combinations
if [ "${cfg_labels}" == "0" ]; then
# All labels
label_indices_base="1 2 3 4 5 6 7 8"
# label_indices_downstream_active="" # No downstream required
elif [ "${cfg_labels}" == "1" ]; then
# Abdominal organs
label_indices_base="1 2 3 5"
# label_indices_downstream_active="" # Any instruction not seen during training is eligible. Set it directly via flag.
elif [ "${cfg_labels}" == "2" ]; then
# Digestive system
label_indices_base="4 6 7 8"
elif [ "${cfg_labels}" == "-1" ]; then
# Self-sup only (using fixed instruction - without any seg. loss)
label_indices_base="1"
# label_indices_downstream_active="" # Any instruction not seen during training is eligible. Set it directly via flag.
else
echo "Invalid labels configuration ${cfg_labels} for dataset configuration ${cfg_dataset}."
exit 1
fi
elif [ "${cfg_dataset}" == "ctorg" ]; then
dataset="ctorg"
# Possible label combinations
if [ "${cfg_labels}" == "0" ]; then
label_indices_base="1 2 3 4 5"
# label_indices_downstream_active="" # No downstream required
elif [ "${cfg_labels}" == "1" ]; then
label_indices_base="1 2 4"
# label_indices_downstream_active="" # Any instruction not seen during training is eligible. Set it directly via flag.
elif [ "${cfg_labels}" == "2" ]; then
label_indices_base="3 5"
# label_indices_downstream_active="" # Any instruction not seen during training is eligible. Set it directly via flag.
elif [ "${cfg_labels}" == "-1" ]; then
label_indices_base="1"
else
echo "Invalid labels configuration ${cfg_labels} for dataset configuration ${cfg_dataset}."
exit 1
fi
else
echo "Invalid dataset configuration ${cfg_dataset}."
exit 1
fi
# Amount of annotated training data
re='^[0-9]+$'
if [ "${cfg_amount}" == "-1" ]; then
num_annotated=-1
elif [[ "${cfg_amount}" =~ $re ]] ; then
num_annotated="${cfg_amount}"
else
echo "Invalid amount configuration ${cfg_amount}."
exit 1
fi
| Shell |
3D | marcdcfischer/PUNet | shell/train_p2.sh | .sh | 1,442 | 35 | #!/bin/bash
# call via nohup ./train_p2.sh 0 &
cd "$(dirname ${0})" || exit
gpu=$1
# See common/cfgs.sh for all options
cfg_training="meta"
cfg_frozen="frozen"
cfg_bias="all"
cfg_labels="0"
cfg_downstream=true
cfg_dimensions="2d"
cfg_architecture="wip"
cfg_prompting="full"
cfg_adaptation="prompting"
cfg_dataset="tcia_btcv" # tcia_btcv or ctorg
cfg_amount="-1" # use half the amount of annotated for tcia_btcv since those are two datasets
ckpts="none" # "none" or (w&b) run name
username="github-user"
labels_downstream=("1" "2" "3" "5")
misc="${*:2}" # fetch remaining parameters
# loop through runs
timestamp=$(date +%Y%m%d_%H%M%S)
multiple_out="multiple_${timestamp}.out"
multiple_err="multiple_${timestamp}.err"
for label_ in "${labels_downstream[@]}"; do
sleep 2
misc_="${misc} --label_indices_downstream_active ${label_}"
echo "bash ./train.sh ${gpu} ${cfg_training} ${cfg_frozen} ${cfg_bias} ${cfg_labels} ${cfg_downstream} ${cfg_dimensions} ${cfg_architecture} ${cfg_prompting} ${cfg_adaptation} ${cfg_dataset} ${cfg_amount} ${ckpts} ${username} ${misc_} >> ${multiple_out} 2>> ${multiple_err} &" >> ${multiple_out} 2>> ${multiple_err}
bash ./train.sh ${gpu} ${cfg_training} ${cfg_frozen} ${cfg_bias} ${cfg_labels} ${cfg_downstream} ${cfg_dimensions} ${cfg_architecture} ${cfg_prompting} ${cfg_adaptation} ${cfg_dataset} ${cfg_amount} ${ckpts} ${username} ${misc_} >> ${multiple_out} 2>> ${multiple_err} &
wait
done
| Shell |
3D | marcdcfischer/PUNet | shell/train_p1.sh | .sh | 1,498 | 33 | #!/bin/bash
# call via nohup ./train_p1.sh 0 &
cd "$(dirname ${0})" || exit
gpu=$1
# See common/cfgs.sh for all options
cfg_training=("meta_self")
cfg_frozen=("nonfrozen")
cfg_bias=("all")
cfg_labels=("0")
cfg_downstream=(false)
cfg_dimensions=("2d")
cfg_architecture=("wip")
cfg_prompting=("full")
cfg_adaptation=("prompting")
cfg_dataset=("tcia_btcv") # tcia_btcv or ctorg
cfg_amount=("-1") # use half the amount of annotated for tcia_btcv since those are two datasets
ckpts=("none") # "none" or (w&b) run name
username="github-user"
misc="${*:2}" # fetch remaining parameters
# loop through runs
timestamp=$(date +%Y%m%d_%H%M%S)
multiple_out="multiple_${timestamp}.out"
multiple_err="multiple_${timestamp}.err"
for idx_ in {0..0}; do
sleep 2
echo "bash ./train.sh ${gpu} ${cfg_training[idx_]} ${cfg_frozen[idx_]} ${cfg_bias[idx_]} ${cfg_labels[idx_]} ${cfg_downstream[idx_]} ${cfg_dimensions[idx_]} ${cfg_architecture[idx_]} ${cfg_prompting[idx_]} ${cfg_adaptation[idx_]} ${cfg_dataset[idx_]} ${cfg_amount[idx_]} ${ckpts[idx_]} ${username} ${misc} >> ${multiple_out} 2>> ${multiple_err} &" >> ${multiple_out} 2>> ${multiple_err}
bash ./train.sh ${gpu} ${cfg_training[idx_]} ${cfg_frozen[idx_]} ${cfg_bias[idx_]} ${cfg_labels[idx_]} ${cfg_downstream[idx_]} ${cfg_dimensions[idx_]} ${cfg_architecture[idx_]} ${cfg_prompting[idx_]} ${cfg_adaptation[idx_]} ${cfg_dataset[idx_]} ${cfg_amount[idx_]} ${ckpts[idx_]} ${username} ${misc} >> ${multiple_out} 2>> ${multiple_err} &
wait
done
| Shell |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/PoseEMat.m | .m | 1,222 | 58 | % PoseEMat - estimate the pose from essential matrix with SVD.
%
% Usage:
% [R1, R2, t1, t2] = PoseEMat(E)
%
% Input:
% E : essential matrix
%
% Output:
% R1 : 3x3 rotation matrix 1
% R2 : 3x3 rotation matrix 2
% t1 : 3x1 translation vector 1
% t2 : 3x1 translation vector 2
%
% This code follows the algorithm given by
% [1] Hartley and Zisserman "Multiple View Geometry in Computer Vision,"
% pp.257-259, 2003.
%
% Kim, Daesik
% Intelligent Systems Research Center
% Sungkyunkwan Univ. (SKKU), South Korea
% E-mail : daesik80@skku.edu
% Homepage: http://www.daesik80.com
%
% June 2008 - Original version.
% Check determinant, 2014 by Fisher Yu
function [R1, R2, t1, t2] = PoseEMat(E)
%% essential matrix decomposition
[U,D,V] = svd(E);
%disp('D')
%disp(D)
W = [0 -1 0; 1 0 0; 0 0 1];
Z = [0 1 0;-1 0 0; 0 0 0];
%% translaton vector (skew-symmetry matrix)
S = U*Z*U';
%% two possible rotation matrices
R1 = U*W*V';
R2 = U*W'*V';
%% two possible translation vectors
t1 = U(:,3);
t2 = -U(:,3);
%% check determinant
if det(R1) < 0
R1 = -R1;
end
if det(R2) < 0
R2 = -R2;
end | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/peig5pt.m | .m | 144,129 | 287 | % fast implementation of the 5pt relative pose problem
%
% by M. Bujnak, Z. Kukelova (c)sep2008
%
%
% Please refer to the following paper, when using this code :
%
% Kukelova, Z., Bujnak, M. and Pajdla, Polynomial eigenvalue solutions
% to the 5-pt and 6-pt relative pose problems, BMVC 2008, Leeds, Sept. 2008
%
% [E] = peig5pt(Q1, Q2)
%
% input:
%
% Q1 - 3x5 matrix with 5x2D measuremets
% Q2 - 3x5 matrix with 5 corresponding 2D points
%
% output:
%
% E - 3Nx3 matrix with N essential matrices, such that Q2(:,i)'*E*Q(:,i) = 0
function [E] = peig5pt(Q1, Q2)
Q1 = Q1';
Q2 = Q2';
Q = [Q2(:,1).*Q1(:,1),...
Q2(:,1).*Q1(:,2),...
Q2(:,1).*Q1(:,3),...
Q2(:,2).*Q1(:,1),...
Q2(:,2).*Q1(:,2),...
Q2(:,2).*Q1(:,3),...
Q2(:,3).*Q1(:,1),...
Q2(:,3).*Q1(:,2),...
Q2(:,3).*Q1(:,3),...
];
[U S V] = svd(Q);
E1 = reshape(V(:,6), 3, 3)';
E2 = reshape(V(:,7), 3, 3)';
E3 = reshape(V(:,8), 3, 3)';
E4 = reshape(V(:,9), 3, 3)';
% equations in coefficient matrix
Mcoefs = zeros(10, 20);
Mcoefs(1) = -E1(1,1)*E1(3,2)*E1(2,3)-E1(2,1)*E1(1,2)*E1(3,3)+E1(1,1)*E1(2,2)*E1(3,3)-E1(3,1)*E1(2,2)*E1(1,3)+E1(2,1)*E1(3,2)*E1(1,3)+E1(3,1)*E1(1,2)*E1(2,3);
Mcoefs(2) = -E1(1,1)*E1(2,3)^2-E1(1,1)*E1(3,3)^2-E1(1,1)*E1(2,2)^2-E1(1,1)*E1(3,2)^2+E1(1,1)*E1(2,1)^2+2*E1(3,1)*E1(1,2)*E1(3,2)+2*E1(3,1)*E1(1,3)*E1(3,3)+2*E1(2,1)*E1(1,2)*E1(2,2)+E1(1,1)*E1(3,1)^2+E1(1,1)^3+E1(1,1)*E1(1,2)^2+E1(1,1)*E1(1,3)^2+2*E1(2,1)*E1(1,3)*E1(2,3);
Mcoefs(3) = 2*E1(1,1)*E1(1,2)*E1(2,2)+2*E1(1,1)*E1(1,3)*E1(2,3)+2*E1(3,1)*E1(3,2)*E1(2,2)+2*E1(3,1)*E1(2,3)*E1(3,3)-E1(2,1)*E1(3,3)^2-E1(2,1)*E1(1,3)^2-E1(2,1)*E1(3,2)^2-E1(2,1)*E1(1,2)^2+E1(2,1)^3+E1(1,1)^2*E1(2,1)+E1(2,1)*E1(3,1)^2+E1(2,1)*E1(2,3)^2+E1(2,1)*E1(2,2)^2;
Mcoefs(4) = 2*E1(2,1)*E1(2,3)*E1(3,3)+E1(3,1)*E1(3,2)^2-E1(3,1)*E1(1,2)^2-E1(3,1)*E1(2,2)^2-E1(3,1)*E1(1,3)^2-E1(3,1)*E1(2,3)^2+2*E1(1,1)*E1(1,2)*E1(3,2)+2*E1(1,1)*E1(1,3)*E1(3,3)+2*E1(2,1)*E1(3,2)*E1(2,2)+E1(3,1)*E1(3,3)^2+E1(1,1)^2*E1(3,1)+E1(2,1)^2*E1(3,1)+E1(3,1)^3;
Mcoefs(5) = E1(1,2)^3+2*E1(1,1)*E1(2,1)*E1(2,2)+2*E1(1,1)*E1(3,1)*E1(3,2)+E1(1,1)^2*E1(1,2)-E1(2,1)^2*E1(1,2)-E1(3,1)^2*E1(1,2)-E1(1,2)*E1(2,3)^2-E1(1,2)*E1(3,3)^2+E1(1,2)*E1(2,2)^2+E1(1,2)*E1(1,3)^2+E1(1,2)*E1(3,2)^2+2*E1(3,2)*E1(1,3)*E1(3,3)+2*E1(2,2)*E1(1,3)*E1(2,3);
Mcoefs(6) = 2*E1(2,1)*E1(3,1)*E1(3,2)+2*E1(1,2)*E1(1,3)*E1(2,3)+2*E1(3,2)*E1(2,3)*E1(3,3)+E1(2,1)^2*E1(2,2)-E1(1,1)^2*E1(2,2)-E1(3,1)^2*E1(2,2)-E1(2,2)*E1(1,3)^2-E1(2,2)*E1(3,3)^2+E1(2,2)^3+2*E1(1,1)*E1(2,1)*E1(1,2)+E1(2,2)*E1(2,3)^2+E1(1,2)^2*E1(2,2)+E1(3,2)^2*E1(2,2);
Mcoefs(7) = -E1(2,1)^2*E1(3,2)-E1(3,2)*E1(1,3)^2-E1(3,2)*E1(2,3)^2+2*E1(1,1)*E1(3,1)*E1(1,2)-E1(1,1)^2*E1(3,2)+E1(3,2)^3+2*E1(2,1)*E1(3,1)*E1(2,2)+2*E1(1,2)*E1(1,3)*E1(3,3)+2*E1(2,2)*E1(2,3)*E1(3,3)+E1(3,2)*E1(2,2)^2+E1(1,2)^2*E1(3,2)+E1(3,1)^2*E1(3,2)+E1(3,2)*E1(3,3)^2;
Mcoefs(8) = 2*E1(1,1)*E1(2,1)*E1(2,3)+2*E1(1,1)*E1(3,1)*E1(3,3)+2*E1(1,2)*E1(3,2)*E1(3,3)+2*E1(1,2)*E1(2,2)*E1(2,3)+E1(1,2)^2*E1(1,3)+E1(1,3)^3+E1(1,3)*E1(2,3)^2+E1(1,1)^2*E1(1,3)-E1(2,1)^2*E1(1,3)-E1(3,1)^2*E1(1,3)-E1(3,2)^2*E1(1,3)-E1(2,2)^2*E1(1,3)+E1(1,3)*E1(3,3)^2;
Mcoefs(9) = 2*E1(1,1)*E1(2,1)*E1(1,3)+2*E1(2,1)*E1(3,1)*E1(3,3)+2*E1(1,2)*E1(2,2)*E1(1,3)+2*E1(3,2)*E1(2,2)*E1(3,3)+E1(2,3)^3-E1(1,1)^2*E1(2,3)-E1(3,1)^2*E1(2,3)-E1(1,2)^2*E1(2,3)-E1(3,2)^2*E1(2,3)+E1(2,1)^2*E1(2,3)+E1(2,2)^2*E1(2,3)+E1(1,3)^2*E1(2,3)+E1(2,3)*E1(3,3)^2;
Mcoefs(10) = 2*E1(1,1)*E1(3,1)*E1(1,3)+2*E1(2,1)*E1(3,1)*E1(2,3)+2*E1(1,2)*E1(3,2)*E1(1,3)+2*E1(3,2)*E1(2,2)*E1(2,3)+E1(3,3)^3+E1(3,2)^2*E1(3,3)-E1(2,2)^2*E1(3,3)+E1(2,3)^2*E1(3,3)+E1(3,1)^2*E1(3,3)+E1(1,3)^2*E1(3,3)-E1(1,1)^2*E1(3,3)-E1(2,1)^2*E1(3,3)-E1(1,2)^2*E1(3,3);
Mcoefs(11) = E1(1,2)*E1(2,3)*E2(3,1)+E1(1,1)*E1(3,3)*E2(2,2)+E1(2,2)*E1(3,3)*E2(1,1)+E1(1,1)*E1(2,2)*E2(3,3)-E1(1,1)*E1(2,3)*E2(3,2)-E1(1,1)*E1(3,2)*E2(2,3)+E1(3,1)*E1(2,3)*E2(1,2)-E1(2,2)*E1(1,3)*E2(3,1)-E1(1,2)*E1(3,3)*E2(2,1)+E1(3,2)*E1(1,3)*E2(2,1)+E1(2,1)*E1(3,2)*E2(1,3)-E1(3,2)*E1(2,3)*E2(1,1)+E1(3,1)*E1(1,2)*E2(2,3)-E1(3,1)*E1(1,3)*E2(2,2)-E1(3,1)*E1(2,2)*E2(1,3)-E1(2,1)*E1(1,2)*E2(3,3)-E1(2,1)*E1(3,3)*E2(1,2)+E1(2,1)*E1(1,3)*E2(3,2);
Mcoefs(12) = 2*E1(3,1)*E1(3,2)*E2(1,2)+2*E1(1,3)*E1(3,3)*E2(3,1)+2*E1(3,1)*E1(3,3)*E2(1,3)+2*E1(1,2)*E1(3,2)*E2(3,1)-2*E1(1,1)*E1(3,2)*E2(3,2)-2*E1(1,1)*E1(3,3)*E2(3,3)+2*E1(2,1)*E1(1,2)*E2(2,2)+2*E1(2,1)*E1(2,2)*E2(1,2)+2*E1(1,1)*E1(2,1)*E2(2,1)+2*E1(1,1)*E1(1,3)*E2(1,3)-2*E1(1,1)*E1(2,3)*E2(2,3)+2*E1(3,1)*E1(1,3)*E2(3,3)+3*E1(1,1)^2*E2(1,1)+E1(3,1)^2*E2(1,1)+E1(1,2)^2*E2(1,1)+E1(2,1)^2*E2(1,1)+E1(1,3)^2*E2(1,1)-E1(2,3)^2*E2(1,1)-E1(3,2)^2*E2(1,1)-E1(3,3)^2*E2(1,1)-E1(2,2)^2*E2(1,1)+2*E1(1,3)*E1(2,3)*E2(2,1)+2*E1(1,1)*E1(1,2)*E2(1,2)-2*E1(1,1)*E1(2,2)*E2(2,2)+2*E1(1,1)*E1(3,1)*E2(3,1)+2*E1(1,2)*E1(2,2)*E2(2,1)+2*E1(2,1)*E1(1,3)*E2(2,3)+2*E1(2,1)*E1(2,3)*E2(1,3)+2*E1(3,1)*E1(1,2)*E2(3,2);
Mcoefs(13) = 2*E1(3,1)*E1(2,3)*E2(3,3)+2*E1(3,1)*E1(3,3)*E2(2,3)+2*E1(1,1)*E1(2,1)*E2(1,1)+2*E1(1,1)*E1(2,2)*E2(1,2)-2*E1(2,1)*E1(3,2)*E2(3,2)+2*E1(3,1)*E1(2,2)*E2(3,2)+2*E1(3,1)*E1(3,2)*E2(2,2)+2*E1(1,1)*E1(2,3)*E2(1,3)+2*E1(2,1)*E1(2,2)*E2(2,2)+2*E1(2,3)*E1(3,3)*E2(3,1)+2*E1(2,1)*E1(2,3)*E2(2,3)+2*E1(1,3)*E1(2,3)*E2(1,1)+2*E1(1,1)*E1(1,2)*E2(2,2)+2*E1(3,2)*E1(2,2)*E2(3,1)-2*E1(2,1)*E1(1,2)*E2(1,2)+2*E1(1,2)*E1(2,2)*E2(1,1)+2*E1(1,1)*E1(1,3)*E2(2,3)-2*E1(2,1)*E1(1,3)*E2(1,3)-2*E1(2,1)*E1(3,3)*E2(3,3)+2*E1(2,1)*E1(3,1)*E2(3,1)-E1(3,2)^2*E2(2,1)-E1(1,3)^2*E2(2,1)-E1(1,2)^2*E2(2,1)+E1(1,1)^2*E2(2,1)+3*E1(2,1)^2*E2(2,1)+E1(2,3)^2*E2(2,1)+E1(2,2)^2*E2(2,1)-E1(3,3)^2*E2(2,1)+E1(3,1)^2*E2(2,1);
Mcoefs(14) = 2*E1(3,1)*E1(3,2)*E2(3,2)-2*E1(3,1)*E1(1,2)*E2(1,2)+2*E1(1,1)*E1(3,1)*E2(1,1)+2*E1(2,1)*E1(3,1)*E2(2,1)-2*E1(3,1)*E1(1,3)*E2(1,3)+2*E1(1,1)*E1(3,2)*E2(1,2)+2*E1(1,2)*E1(3,2)*E2(1,1)-2*E1(3,1)*E1(2,2)*E2(2,2)+2*E1(2,1)*E1(2,2)*E2(3,2)+2*E1(2,1)*E1(3,3)*E2(2,3)+2*E1(1,1)*E1(1,3)*E2(3,3)+2*E1(1,1)*E1(1,2)*E2(3,2)-2*E1(3,1)*E1(2,3)*E2(2,3)+2*E1(1,3)*E1(3,3)*E2(1,1)+2*E1(2,3)*E1(3,3)*E2(2,1)+2*E1(2,1)*E1(3,2)*E2(2,2)+2*E1(1,1)*E1(3,3)*E2(1,3)+E1(3,3)^2*E2(3,1)+2*E1(3,2)*E1(2,2)*E2(2,1)+2*E1(2,1)*E1(2,3)*E2(3,3)+2*E1(3,1)*E1(3,3)*E2(3,3)+E1(3,2)^2*E2(3,1)-E1(1,2)^2*E2(3,1)-E1(2,3)^2*E2(3,1)-E1(1,3)^2*E2(3,1)-E1(2,2)^2*E2(3,1)+E1(2,1)^2*E2(3,1)+E1(1,1)^2*E2(3,1)+3*E1(3,1)^2*E2(3,1);
Mcoefs(15) = -E1(3,3)^2*E2(1,2)-E1(2,1)^2*E2(1,2)-E1(3,1)^2*E2(1,2)+3*E1(1,2)^2*E2(1,2)+E1(1,1)^2*E2(1,2)+E1(2,2)^2*E2(1,2)+E1(3,2)^2*E2(1,2)+E1(1,3)^2*E2(1,2)+2*E1(1,2)*E1(1,3)*E2(1,3)+2*E1(1,3)*E1(2,3)*E2(2,2)-2*E1(2,1)*E1(1,2)*E2(2,1)+2*E1(3,2)*E1(1,3)*E2(3,3)-2*E1(1,2)*E1(3,3)*E2(3,3)-2*E1(1,2)*E1(2,3)*E2(2,3)+2*E1(1,1)*E1(1,2)*E2(1,1)+2*E1(1,1)*E1(2,1)*E2(2,2)+2*E1(2,2)*E1(1,3)*E2(2,3)+2*E1(2,2)*E1(2,3)*E2(1,3)+2*E1(1,2)*E1(2,2)*E2(2,2)+2*E1(1,2)*E1(3,2)*E2(3,2)+2*E1(1,3)*E1(3,3)*E2(3,2)-2*E1(3,1)*E1(1,2)*E2(3,1)+2*E1(3,1)*E1(3,2)*E2(1,1)+2*E1(1,1)*E1(3,1)*E2(3,2)+2*E1(1,1)*E1(2,2)*E2(2,1)+2*E1(3,2)*E1(3,3)*E2(1,3)-E1(2,3)^2*E2(1,2)+2*E1(2,1)*E1(2,2)*E2(1,1)+2*E1(1,1)*E1(3,2)*E2(3,1);
Mcoefs(16) = E1(2,3)^2*E2(2,2)-2*E1(3,1)*E1(2,2)*E2(3,1)+2*E1(1,1)*E1(2,1)*E2(1,2)+2*E1(1,2)*E1(2,3)*E2(1,3)+2*E1(3,2)*E1(2,3)*E2(3,3)+2*E1(3,2)*E1(2,2)*E2(3,2)-2*E1(1,1)*E1(2,2)*E2(1,1)+2*E1(1,1)*E1(1,2)*E2(2,1)-2*E1(2,2)*E1(1,3)*E2(1,3)+2*E1(2,3)*E1(3,3)*E2(3,2)+2*E1(2,1)*E1(3,2)*E2(3,1)+2*E1(1,2)*E1(1,3)*E2(2,3)+2*E1(1,3)*E1(2,3)*E2(1,2)+2*E1(2,1)*E1(2,2)*E2(2,1)+2*E1(1,2)*E1(2,2)*E2(1,2)+2*E1(3,1)*E1(3,2)*E2(2,1)-2*E1(2,2)*E1(3,3)*E2(3,3)+2*E1(2,1)*E1(1,2)*E2(1,1)+2*E1(2,1)*E1(3,1)*E2(3,2)+2*E1(2,2)*E1(2,3)*E2(2,3)+2*E1(3,2)*E1(3,3)*E2(2,3)+E1(3,2)^2*E2(2,2)-E1(1,1)^2*E2(2,2)-E1(3,1)^2*E2(2,2)-E1(1,3)^2*E2(2,2)+3*E1(2,2)^2*E2(2,2)-E1(3,3)^2*E2(2,2)+E1(2,1)^2*E2(2,2)+E1(1,2)^2*E2(2,2);
Mcoefs(17) = E1(3,1)^2*E2(3,2)-E1(1,1)^2*E2(3,2)+3*E1(3,2)^2*E2(3,2)-E1(2,1)^2*E2(3,2)-E1(1,3)^2*E2(3,2)-E1(2,3)^2*E2(3,2)+E1(1,2)^2*E2(3,2)+E1(3,3)^2*E2(3,2)+E1(2,2)^2*E2(3,2)+2*E1(1,2)*E1(1,3)*E2(3,3)+2*E1(1,2)*E1(3,2)*E2(1,2)-2*E1(1,1)*E1(3,2)*E2(1,1)+2*E1(1,3)*E1(3,3)*E2(1,2)+2*E1(2,3)*E1(3,3)*E2(2,2)+2*E1(2,2)*E1(2,3)*E2(3,3)+2*E1(1,2)*E1(3,3)*E2(1,3)-2*E1(3,2)*E1(2,3)*E2(2,3)+2*E1(3,2)*E1(3,3)*E2(3,3)-2*E1(3,2)*E1(1,3)*E2(1,3)+2*E1(1,1)*E1(3,1)*E2(1,2)+2*E1(2,1)*E1(2,2)*E2(3,1)+2*E1(2,1)*E1(3,1)*E2(2,2)-2*E1(2,1)*E1(3,2)*E2(2,1)+2*E1(3,2)*E1(2,2)*E2(2,2)+2*E1(1,1)*E1(1,2)*E2(3,1)+2*E1(3,1)*E1(1,2)*E2(1,1)+2*E1(2,2)*E1(3,3)*E2(2,3)+2*E1(3,1)*E1(2,2)*E2(2,1)+2*E1(3,1)*E1(3,2)*E2(3,1);
Mcoefs(18) = 2*E1(3,1)*E1(3,3)*E2(1,1)+2*E1(1,1)*E1(2,1)*E2(2,3)-2*E1(2,2)*E1(1,3)*E2(2,2)+2*E1(1,1)*E1(1,3)*E2(1,1)+2*E1(1,1)*E1(2,3)*E2(2,1)+2*E1(2,1)*E1(2,3)*E2(1,1)+2*E1(1,1)*E1(3,3)*E2(3,1)+2*E1(1,3)*E1(3,3)*E2(3,3)+2*E1(1,2)*E1(2,3)*E2(2,2)+E1(1,1)^2*E2(1,3)+E1(1,2)^2*E2(1,3)-E1(2,2)^2*E2(1,3)+E1(3,3)^2*E2(1,3)-E1(3,1)^2*E2(1,3)+2*E1(1,2)*E1(3,3)*E2(3,2)+2*E1(3,2)*E1(3,3)*E2(1,2)+2*E1(2,2)*E1(2,3)*E2(1,2)-2*E1(3,1)*E1(1,3)*E2(3,1)+2*E1(1,2)*E1(2,2)*E2(2,3)-2*E1(3,2)*E1(1,3)*E2(3,2)+2*E1(1,3)*E1(2,3)*E2(2,3)+2*E1(1,2)*E1(3,2)*E2(3,3)+2*E1(1,2)*E1(1,3)*E2(1,2)+2*E1(1,1)*E1(3,1)*E2(3,3)-2*E1(2,1)*E1(1,3)*E2(2,1)-E1(2,1)^2*E2(1,3)+3*E1(1,3)^2*E2(1,3)+E1(2,3)^2*E2(1,3)-E1(3,2)^2*E2(1,3);
Mcoefs(19) = -E1(1,2)^2*E2(2,3)+E1(3,3)^2*E2(2,3)+E1(2,1)^2*E2(2,3)-E1(3,1)^2*E2(2,3)+3*E1(2,3)^2*E2(2,3)-E1(3,2)^2*E2(2,3)-E1(1,1)^2*E2(2,3)+E1(2,2)^2*E2(2,3)+E1(1,3)^2*E2(2,3)+2*E1(2,2)*E1(2,3)*E2(2,2)+2*E1(1,3)*E1(2,3)*E2(1,3)+2*E1(2,1)*E1(2,3)*E2(2,1)+2*E1(2,2)*E1(3,3)*E2(3,2)+2*E1(3,2)*E1(3,3)*E2(2,2)-2*E1(1,2)*E1(2,3)*E2(1,2)+2*E1(1,2)*E1(1,3)*E2(2,2)+2*E1(2,2)*E1(1,3)*E2(1,2)+2*E1(1,1)*E1(2,1)*E2(1,3)+2*E1(1,1)*E1(1,3)*E2(2,1)-2*E1(1,1)*E1(2,3)*E2(1,1)+2*E1(2,3)*E1(3,3)*E2(3,3)+2*E1(2,1)*E1(1,3)*E2(1,1)+2*E1(2,1)*E1(3,1)*E2(3,3)+2*E1(1,2)*E1(2,2)*E2(1,3)-2*E1(3,1)*E1(2,3)*E2(3,1)-2*E1(3,2)*E1(2,3)*E2(3,2)+2*E1(3,1)*E1(3,3)*E2(2,1)+2*E1(2,1)*E1(3,3)*E2(3,1)+2*E1(3,2)*E1(2,2)*E2(3,3);
Mcoefs(20) = 2*E1(3,1)*E1(3,3)*E2(3,1)+2*E1(2,1)*E1(2,3)*E2(3,1)+E1(2,3)^2*E2(3,3)+2*E1(3,2)*E1(1,3)*E2(1,2)-2*E1(2,2)*E1(3,3)*E2(2,2)+2*E1(2,2)*E1(2,3)*E2(3,2)+2*E1(3,2)*E1(2,3)*E2(2,2)+2*E1(1,3)*E1(3,3)*E2(1,3)+2*E1(3,1)*E1(2,3)*E2(2,1)+2*E1(1,1)*E1(1,3)*E2(3,1)+2*E1(3,1)*E1(1,3)*E2(1,1)+2*E1(1,1)*E1(3,1)*E2(1,3)+2*E1(1,2)*E1(3,2)*E2(1,3)-2*E1(1,1)*E1(3,3)*E2(1,1)+2*E1(2,1)*E1(3,1)*E2(2,3)+2*E1(1,2)*E1(1,3)*E2(3,2)+2*E1(2,3)*E1(3,3)*E2(2,3)-E1(2,2)^2*E2(3,3)-E1(1,1)^2*E2(3,3)-E1(1,2)^2*E2(3,3)-E1(2,1)^2*E2(3,3)+3*E1(3,3)^2*E2(3,3)+E1(3,1)^2*E2(3,3)+E1(1,3)^2*E2(3,3)+E1(3,2)^2*E2(3,3)-2*E1(1,2)*E1(3,3)*E2(1,2)+2*E1(3,2)*E1(2,2)*E2(2,3)+2*E1(3,2)*E1(3,3)*E2(3,2)-2*E1(2,1)*E1(3,3)*E2(2,1);
Mcoefs(21) = -E1(1,1)*E2(3,2)*E2(2,3)-E1(2,3)*E2(1,1)*E2(3,2)-E1(3,2)*E2(1,1)*E2(2,3)+E1(1,1)*E2(2,2)*E2(3,3)+E1(2,3)*E2(3,1)*E2(1,2)+E1(1,3)*E2(2,1)*E2(3,2)+E1(3,3)*E2(1,1)*E2(2,2)+E1(3,2)*E2(2,1)*E2(1,3)-E1(1,2)*E2(2,1)*E2(3,3)-E1(3,3)*E2(2,1)*E2(1,2)+E1(2,2)*E2(1,1)*E2(3,3)+E1(2,1)*E2(3,2)*E2(1,3)+E1(3,1)*E2(1,2)*E2(2,3)-E1(3,1)*E2(2,2)*E2(1,3)-E1(1,3)*E2(3,1)*E2(2,2)-E1(2,2)*E2(3,1)*E2(1,3)+E1(1,2)*E2(3,1)*E2(2,3)-E1(2,1)*E2(1,2)*E2(3,3);
Mcoefs(22) = 3*E1(1,1)*E2(1,1)^2-E1(1,1)*E2(3,3)^2+E1(1,1)*E2(1,3)^2-E1(1,1)*E2(2,3)^2+2*E1(2,1)*E2(1,2)*E2(2,2)+2*E1(1,3)*E2(3,1)*E2(3,3)+2*E1(3,3)*E2(3,1)*E2(1,3)+2*E1(3,1)*E2(1,1)*E2(3,1)+2*E1(2,1)*E2(1,3)*E2(2,3)+2*E1(3,1)*E2(1,2)*E2(3,2)+2*E1(1,3)*E2(1,1)*E2(1,3)-2*E1(2,3)*E2(1,1)*E2(2,3)+2*E1(1,2)*E2(1,1)*E2(1,2)-2*E1(3,2)*E2(1,1)*E2(3,2)-2*E1(3,3)*E2(1,1)*E2(3,3)+2*E1(2,1)*E2(1,1)*E2(2,1)+E1(1,1)*E2(3,1)^2-E1(1,1)*E2(3,2)^2-E1(1,1)*E2(2,2)^2+E1(1,1)*E2(1,2)^2+2*E1(1,2)*E2(2,1)*E2(2,2)+2*E1(2,2)*E2(2,1)*E2(1,2)+E1(1,1)*E2(2,1)^2+2*E1(1,3)*E2(2,1)*E2(2,3)+2*E1(1,2)*E2(3,1)*E2(3,2)+2*E1(3,2)*E2(3,1)*E2(1,2)+2*E1(2,3)*E2(2,1)*E2(1,3)-2*E1(2,2)*E2(1,1)*E2(2,2)+2*E1(3,1)*E2(1,3)*E2(3,3);
Mcoefs(23) = E1(2,1)*E2(2,2)^2+3*E1(2,1)*E2(2,1)^2-E1(2,1)*E2(1,3)^2-E1(2,1)*E2(1,2)^2-E1(2,1)*E2(3,3)^2-E1(2,1)*E2(3,2)^2+E1(2,1)*E2(3,1)^2+E1(2,1)*E2(1,1)^2+E1(2,1)*E2(2,3)^2+2*E1(1,1)*E2(1,3)*E2(2,3)+2*E1(3,1)*E2(2,3)*E2(3,3)+2*E1(2,3)*E2(2,1)*E2(2,3)+2*E1(3,1)*E2(2,1)*E2(3,1)-2*E1(1,2)*E2(2,1)*E2(1,2)+2*E1(1,1)*E2(1,1)*E2(2,1)+2*E1(3,1)*E2(3,2)*E2(2,2)-2*E1(3,3)*E2(2,1)*E2(3,3)+2*E1(3,3)*E2(3,1)*E2(2,3)+2*E1(2,3)*E2(3,1)*E2(3,3)+2*E1(3,2)*E2(3,1)*E2(2,2)+2*E1(2,2)*E2(2,1)*E2(2,2)-2*E1(3,2)*E2(2,1)*E2(3,2)+2*E1(2,3)*E2(1,1)*E2(1,3)+2*E1(1,3)*E2(1,1)*E2(2,3)-2*E1(1,3)*E2(2,1)*E2(1,3)+2*E1(1,2)*E2(1,1)*E2(2,2)+2*E1(2,2)*E2(3,1)*E2(3,2)+2*E1(1,1)*E2(1,2)*E2(2,2)+2*E1(2,2)*E2(1,1)*E2(1,2);
Mcoefs(24) = -E1(3,1)*E2(2,2)^2-E1(3,1)*E2(1,3)^2+E1(3,1)*E2(2,1)^2-E1(3,1)*E2(1,2)^2+E1(3,1)*E2(3,3)^2+E1(3,1)*E2(1,1)^2-E1(3,1)*E2(2,3)^2+3*E1(3,1)*E2(3,1)^2+E1(3,1)*E2(3,2)^2-2*E1(1,3)*E2(3,1)*E2(1,3)+2*E1(3,2)*E2(2,1)*E2(2,2)-2*E1(2,3)*E2(3,1)*E2(2,3)+2*E1(3,2)*E2(3,1)*E2(3,2)+2*E1(1,1)*E2(1,1)*E2(3,1)-2*E1(1,2)*E2(3,1)*E2(1,2)+2*E1(1,2)*E2(1,1)*E2(3,2)+2*E1(1,3)*E2(1,1)*E2(3,3)+2*E1(2,3)*E2(2,1)*E2(3,3)+2*E1(1,1)*E2(1,2)*E2(3,2)+2*E1(2,2)*E2(2,1)*E2(3,2)+2*E1(2,1)*E2(3,2)*E2(2,2)+2*E1(3,3)*E2(1,1)*E2(1,3)+2*E1(2,1)*E2(2,1)*E2(3,1)+2*E1(2,1)*E2(2,3)*E2(3,3)+2*E1(3,3)*E2(3,1)*E2(3,3)+2*E1(3,3)*E2(2,1)*E2(2,3)-2*E1(2,2)*E2(3,1)*E2(2,2)+2*E1(1,1)*E2(1,3)*E2(3,3)+2*E1(3,2)*E2(1,1)*E2(1,2);
Mcoefs(25) = -E1(1,2)*E2(2,3)^2+E1(1,2)*E2(3,2)^2-E1(1,2)*E2(2,1)^2+3*E1(1,2)*E2(1,2)^2+E1(1,2)*E2(2,2)^2+E1(1,2)*E2(1,3)^2-E1(1,2)*E2(3,1)^2-E1(1,2)*E2(3,3)^2+2*E1(3,2)*E2(1,2)*E2(3,2)+2*E1(1,1)*E2(2,1)*E2(2,2)+2*E1(2,1)*E2(1,1)*E2(2,2)+2*E1(1,3)*E2(3,2)*E2(3,3)+2*E1(1,1)*E2(1,1)*E2(1,2)+2*E1(2,2)*E2(1,2)*E2(2,2)+2*E1(3,2)*E2(1,1)*E2(3,1)+2*E1(3,2)*E2(1,3)*E2(3,3)+2*E1(2,2)*E2(1,3)*E2(2,3)+2*E1(1,1)*E2(3,1)*E2(3,2)+2*E1(3,1)*E2(1,1)*E2(3,2)-2*E1(2,3)*E2(1,2)*E2(2,3)+2*E1(1,3)*E2(1,2)*E2(1,3)-2*E1(3,3)*E2(1,2)*E2(3,3)+2*E1(1,3)*E2(2,2)*E2(2,3)+2*E1(2,3)*E2(2,2)*E2(1,3)-2*E1(3,1)*E2(3,1)*E2(1,2)+2*E1(3,3)*E2(3,2)*E2(1,3)-2*E1(2,1)*E2(2,1)*E2(1,2)+2*E1(2,2)*E2(1,1)*E2(2,1)+E1(1,2)*E2(1,1)^2;
Mcoefs(26) = -E1(2,2)*E2(3,3)^2+E1(2,2)*E2(2,1)^2+3*E1(2,2)*E2(2,2)^2+E1(2,2)*E2(1,2)^2-E1(2,2)*E2(3,1)^2+E1(2,2)*E2(2,3)^2-2*E1(3,1)*E2(3,1)*E2(2,2)+2*E1(2,1)*E2(1,1)*E2(1,2)+2*E1(3,3)*E2(3,2)*E2(2,3)+2*E1(2,3)*E2(2,2)*E2(2,3)+2*E1(3,2)*E2(2,1)*E2(3,1)+2*E1(1,2)*E2(1,2)*E2(2,2)+2*E1(2,1)*E2(2,1)*E2(2,2)+2*E1(1,2)*E2(1,3)*E2(2,3)+2*E1(2,1)*E2(3,1)*E2(3,2)-2*E1(3,3)*E2(2,2)*E2(3,3)-E1(2,2)*E2(1,3)^2+2*E1(1,2)*E2(1,1)*E2(2,1)-2*E1(1,1)*E2(1,1)*E2(2,2)-2*E1(1,3)*E2(2,2)*E2(1,3)+2*E1(3,2)*E2(3,2)*E2(2,2)-E1(2,2)*E2(1,1)^2+2*E1(2,3)*E2(3,2)*E2(3,3)+2*E1(3,1)*E2(2,1)*E2(3,2)+2*E1(3,2)*E2(2,3)*E2(3,3)+2*E1(1,3)*E2(1,2)*E2(2,3)+2*E1(2,3)*E2(1,2)*E2(1,3)+2*E1(1,1)*E2(2,1)*E2(1,2)+E1(2,2)*E2(3,2)^2;
Mcoefs(27) = -2*E1(2,3)*E2(3,2)*E2(2,3)-2*E1(2,1)*E2(2,1)*E2(3,2)+2*E1(2,1)*E2(3,1)*E2(2,2)+2*E1(3,1)*E2(2,1)*E2(2,2)+2*E1(2,2)*E2(2,1)*E2(3,1)-2*E1(1,3)*E2(3,2)*E2(1,3)-2*E1(1,1)*E2(1,1)*E2(3,2)+2*E1(3,1)*E2(3,1)*E2(3,2)+2*E1(2,3)*E2(2,2)*E2(3,3)+2*E1(1,1)*E2(3,1)*E2(1,2)+2*E1(3,3)*E2(3,2)*E2(3,3)+2*E1(1,2)*E2(1,1)*E2(3,1)+2*E1(1,2)*E2(1,2)*E2(3,2)+2*E1(3,3)*E2(2,2)*E2(2,3)+2*E1(1,2)*E2(1,3)*E2(3,3)+2*E1(3,1)*E2(1,1)*E2(1,2)+2*E1(2,2)*E2(3,2)*E2(2,2)+2*E1(3,3)*E2(1,2)*E2(1,3)+2*E1(2,2)*E2(2,3)*E2(3,3)+2*E1(1,3)*E2(1,2)*E2(3,3)+E1(3,2)*E2(2,2)^2-E1(3,2)*E2(2,3)^2+E1(3,2)*E2(3,1)^2-E1(3,2)*E2(2,1)^2+E1(3,2)*E2(3,3)^2+E1(3,2)*E2(1,2)^2-E1(3,2)*E2(1,3)^2-E1(3,2)*E2(1,1)^2+3*E1(3,2)*E2(3,2)^2;
Mcoefs(28) = 2*E1(3,1)*E2(1,1)*E2(3,3)-2*E1(2,2)*E2(2,2)*E2(1,3)+2*E1(1,1)*E2(2,1)*E2(2,3)+2*E1(2,3)*E2(1,1)*E2(2,1)+2*E1(3,3)*E2(1,1)*E2(3,1)+2*E1(3,2)*E2(1,2)*E2(3,3)+2*E1(1,2)*E2(3,2)*E2(3,3)+2*E1(1,1)*E2(1,1)*E2(1,3)+2*E1(2,3)*E2(1,3)*E2(2,3)+2*E1(3,3)*E2(1,2)*E2(3,2)+2*E1(3,3)*E2(1,3)*E2(3,3)-2*E1(3,1)*E2(3,1)*E2(1,3)-2*E1(3,2)*E2(3,2)*E2(1,3)+2*E1(1,2)*E2(1,2)*E2(1,3)+2*E1(2,3)*E2(1,2)*E2(2,2)-2*E1(2,1)*E2(2,1)*E2(1,3)-E1(1,3)*E2(2,1)^2+3*E1(1,3)*E2(1,3)^2-E1(1,3)*E2(2,2)^2-E1(1,3)*E2(3,1)^2+E1(1,3)*E2(1,1)^2+E1(1,3)*E2(1,2)^2-E1(1,3)*E2(3,2)^2+E1(1,3)*E2(2,3)^2+E1(1,3)*E2(3,3)^2+2*E1(1,2)*E2(2,2)*E2(2,3)+2*E1(2,2)*E2(1,2)*E2(2,3)+2*E1(2,1)*E2(1,1)*E2(2,3)+2*E1(1,1)*E2(3,1)*E2(3,3);
Mcoefs(29) = 2*E1(1,3)*E2(1,2)*E2(2,2)+2*E1(3,3)*E2(2,3)*E2(3,3)+2*E1(2,2)*E2(2,2)*E2(2,3)+2*E1(2,1)*E2(2,1)*E2(2,3)+2*E1(1,1)*E2(2,1)*E2(1,3)+2*E1(2,1)*E2(1,1)*E2(1,3)+2*E1(2,2)*E2(3,2)*E2(3,3)+2*E1(3,2)*E2(2,2)*E2(3,3)+2*E1(3,3)*E2(2,1)*E2(3,1)+2*E1(2,1)*E2(3,1)*E2(3,3)-2*E1(3,1)*E2(3,1)*E2(2,3)-2*E1(1,1)*E2(1,1)*E2(2,3)-2*E1(1,2)*E2(1,2)*E2(2,3)+2*E1(1,3)*E2(1,3)*E2(2,3)+2*E1(3,3)*E2(3,2)*E2(2,2)+2*E1(1,2)*E2(2,2)*E2(1,3)+2*E1(2,2)*E2(1,2)*E2(1,3)-2*E1(3,2)*E2(3,2)*E2(2,3)+2*E1(3,1)*E2(2,1)*E2(3,3)+2*E1(1,3)*E2(1,1)*E2(2,1)+E1(2,3)*E2(1,3)^2+E1(2,3)*E2(2,2)^2+E1(2,3)*E2(3,3)^2+E1(2,3)*E2(2,1)^2+3*E1(2,3)*E2(2,3)^2-E1(2,3)*E2(1,2)^2-E1(2,3)*E2(3,1)^2-E1(2,3)*E2(3,2)^2-E1(2,3)*E2(1,1)^2;
Mcoefs(30) = E1(3,3)*E2(1,3)^2-E1(3,3)*E2(1,1)^2+E1(3,3)*E2(3,2)^2-E1(3,3)*E2(1,2)^2+3*E1(3,3)*E2(3,3)^2-E1(3,3)*E2(2,1)^2-E1(3,3)*E2(2,2)^2+E1(3,3)*E2(3,1)^2+E1(3,3)*E2(2,3)^2+2*E1(1,1)*E2(3,1)*E2(1,3)+2*E1(3,1)*E2(1,1)*E2(1,3)-2*E1(2,1)*E2(2,1)*E2(3,3)+2*E1(1,3)*E2(1,1)*E2(3,1)+2*E1(1,3)*E2(1,2)*E2(3,2)+2*E1(2,3)*E2(2,1)*E2(3,1)-2*E1(1,1)*E2(1,1)*E2(3,3)+2*E1(1,2)*E2(3,2)*E2(1,3)+2*E1(3,2)*E2(1,2)*E2(1,3)-2*E1(2,2)*E2(2,2)*E2(3,3)+2*E1(1,3)*E2(1,3)*E2(3,3)+2*E1(3,2)*E2(2,2)*E2(2,3)+2*E1(2,3)*E2(2,3)*E2(3,3)-2*E1(1,2)*E2(1,2)*E2(3,3)+2*E1(3,2)*E2(3,2)*E2(3,3)+2*E1(3,1)*E2(3,1)*E2(3,3)+2*E1(3,1)*E2(2,1)*E2(2,3)+2*E1(2,2)*E2(3,2)*E2(2,3)+2*E1(2,3)*E2(3,2)*E2(2,2)+2*E1(2,1)*E2(3,1)*E2(2,3);
Mcoefs(31) = E2(1,1)*E2(2,2)*E2(3,3)+E2(2,1)*E2(3,2)*E2(1,3)+E2(3,1)*E2(1,2)*E2(2,3)-E2(2,1)*E2(1,2)*E2(3,3)-E2(1,1)*E2(3,2)*E2(2,3)-E2(3,1)*E2(2,2)*E2(1,3);
Mcoefs(32) = E2(1,1)*E2(2,1)^2+2*E2(2,1)*E2(1,2)*E2(2,2)+E2(1,1)^3+2*E2(2,1)*E2(1,3)*E2(2,3)+2*E2(3,1)*E2(1,3)*E2(3,3)+E2(1,1)*E2(1,3)^2-E2(1,1)*E2(2,2)^2-E2(1,1)*E2(2,3)^2-E2(1,1)*E2(3,2)^2-E2(1,1)*E2(3,3)^2+E2(1,1)*E2(1,2)^2+E2(1,1)*E2(3,1)^2+2*E2(3,1)*E2(1,2)*E2(3,2);
Mcoefs(33) = E2(2,1)^3+E2(2,1)*E2(2,3)^2+2*E2(3,1)*E2(3,2)*E2(2,2)+2*E2(3,1)*E2(2,3)*E2(3,3)+2*E2(1,1)*E2(1,3)*E2(2,3)+2*E2(1,1)*E2(1,2)*E2(2,2)+E2(2,1)*E2(2,2)^2-E2(2,1)*E2(1,3)^2-E2(2,1)*E2(1,2)^2-E2(2,1)*E2(3,3)^2-E2(2,1)*E2(3,2)^2+E2(2,1)*E2(3,1)^2+E2(1,1)^2*E2(2,1);
Mcoefs(34) = 2*E2(1,1)*E2(1,2)*E2(3,2)+2*E2(2,1)*E2(3,2)*E2(2,2)+2*E2(1,1)*E2(1,3)*E2(3,3)+2*E2(2,1)*E2(2,3)*E2(3,3)+E2(3,1)*E2(3,3)^2+E2(3,1)*E2(3,2)^2+E2(2,1)^2*E2(3,1)-E2(3,1)*E2(2,3)^2-E2(3,1)*E2(1,2)^2-E2(3,1)*E2(2,2)^2-E2(3,1)*E2(1,3)^2+E2(1,1)^2*E2(3,1)+E2(3,1)^3;
Mcoefs(35) = -E2(1,2)*E2(2,3)^2-E2(3,1)^2*E2(1,2)-E2(1,2)*E2(3,3)^2+E2(1,1)^2*E2(1,2)+E2(1,2)*E2(3,2)^2+E2(1,2)*E2(2,2)^2+E2(1,2)*E2(1,3)^2-E2(2,1)^2*E2(1,2)+2*E2(2,2)*E2(1,3)*E2(2,3)+E2(1,2)^3+2*E2(1,1)*E2(3,1)*E2(3,2)+2*E2(3,2)*E2(1,3)*E2(3,3)+2*E2(1,1)*E2(2,1)*E2(2,2);
Mcoefs(36) = E2(1,2)^2*E2(2,2)-E2(1,1)^2*E2(2,2)-E2(3,1)^2*E2(2,2)-E2(2,2)*E2(1,3)^2-E2(2,2)*E2(3,3)^2+E2(2,1)^2*E2(2,2)+E2(2,2)*E2(2,3)^2+E2(3,2)^2*E2(2,2)+2*E2(3,2)*E2(2,3)*E2(3,3)+E2(2,2)^3+2*E2(2,1)*E2(3,1)*E2(3,2)+2*E2(1,2)*E2(1,3)*E2(2,3)+2*E2(1,1)*E2(2,1)*E2(1,2);
Mcoefs(37) = E2(1,2)^2*E2(3,2)+E2(3,1)^2*E2(3,2)+E2(3,2)*E2(2,2)^2-E2(2,1)^2*E2(3,2)-E2(3,2)*E2(2,3)^2+E2(3,2)*E2(3,3)^2-E2(3,2)*E2(1,3)^2-E2(1,1)^2*E2(3,2)+E2(3,2)^3+2*E2(1,1)*E2(3,1)*E2(1,2)+2*E2(1,2)*E2(1,3)*E2(3,3)+2*E2(2,1)*E2(3,1)*E2(2,2)+2*E2(2,2)*E2(2,3)*E2(3,3);
Mcoefs(38) = E2(1,3)*E2(3,3)^2+E2(1,3)*E2(2,3)^2-E2(2,2)^2*E2(1,3)-E2(3,1)^2*E2(1,3)-E2(2,1)^2*E2(1,3)-E2(3,2)^2*E2(1,3)+E2(1,2)^2*E2(1,3)+E2(1,1)^2*E2(1,3)+2*E2(1,1)*E2(3,1)*E2(3,3)+E2(1,3)^3+2*E2(1,2)*E2(2,2)*E2(2,3)+2*E2(1,2)*E2(3,2)*E2(3,3)+2*E2(1,1)*E2(2,1)*E2(2,3);
Mcoefs(39) = E2(2,1)^2*E2(2,3)+E2(2,2)^2*E2(2,3)-E2(1,2)^2*E2(2,3)-E2(3,1)^2*E2(2,3)-E2(3,2)^2*E2(2,3)-E2(1,1)^2*E2(2,3)+E2(1,3)^2*E2(2,3)+E2(2,3)*E2(3,3)^2+E2(2,3)^3+2*E2(3,2)*E2(2,2)*E2(3,3)+2*E2(1,2)*E2(2,2)*E2(1,3)+2*E2(2,1)*E2(3,1)*E2(3,3)+2*E2(1,1)*E2(2,1)*E2(1,3);
Mcoefs(40) = E2(3,3)^3-E2(2,1)^2*E2(3,3)-E2(1,2)^2*E2(3,3)-E2(1,1)^2*E2(3,3)-E2(2,2)^2*E2(3,3)+E2(2,3)^2*E2(3,3)+2*E2(1,1)*E2(3,1)*E2(1,3)+E2(3,2)^2*E2(3,3)+2*E2(2,1)*E2(3,1)*E2(2,3)+2*E2(3,2)*E2(2,2)*E2(2,3)+E2(1,3)^2*E2(3,3)+2*E2(1,2)*E2(3,2)*E2(1,3)+E2(3,1)^2*E2(3,3);
Mcoefs(41) = -E1(1,1)*E1(2,3)*E3(3,2)-E1(1,1)*E1(3,2)*E3(2,3)-E1(3,2)*E1(2,3)*E3(1,1)-E1(2,1)*E1(1,2)*E3(3,3)-E1(2,1)*E1(3,3)*E3(1,2)-E1(1,2)*E1(3,3)*E3(2,1)+E1(1,1)*E1(2,2)*E3(3,3)+E1(1,1)*E1(3,3)*E3(2,2)+E1(2,2)*E1(3,3)*E3(1,1)+E1(2,1)*E1(1,3)*E3(3,2)+E1(2,1)*E1(3,2)*E3(1,3)+E1(3,2)*E1(1,3)*E3(2,1)-E1(3,1)*E1(1,3)*E3(2,2)-E1(3,1)*E1(2,2)*E3(1,3)-E1(2,2)*E1(1,3)*E3(3,1)+E1(3,1)*E1(1,2)*E3(2,3)+E1(3,1)*E1(2,3)*E3(1,2)+E1(1,2)*E1(2,3)*E3(3,1);
Mcoefs(42) = E1(1,2)^2*E3(1,1)-E1(3,3)^2*E3(1,1)-E1(2,2)^2*E3(1,1)+3*E1(1,1)^2*E3(1,1)-E1(3,2)^2*E3(1,1)-E1(2,3)^2*E3(1,1)+E1(1,3)^2*E3(1,1)+E1(2,1)^2*E3(1,1)+E1(3,1)^2*E3(1,1)+2*E1(1,1)*E1(2,1)*E3(2,1)+2*E1(2,1)*E1(1,2)*E3(2,2)+2*E1(2,1)*E1(2,2)*E3(1,2)+2*E1(1,1)*E1(3,1)*E3(3,1)+2*E1(1,3)*E1(2,3)*E3(2,1)+2*E1(2,1)*E1(1,3)*E3(2,3)+2*E1(3,1)*E1(1,2)*E3(3,2)+2*E1(2,1)*E1(2,3)*E3(1,3)+2*E1(1,2)*E1(2,2)*E3(2,1)+2*E1(1,1)*E1(1,3)*E3(1,3)-2*E1(1,1)*E1(2,3)*E3(2,3)+2*E1(3,1)*E1(1,3)*E3(3,3)+2*E1(3,1)*E1(3,3)*E3(1,3)-2*E1(1,1)*E1(2,2)*E3(2,2)+2*E1(1,2)*E1(3,2)*E3(3,1)-2*E1(1,1)*E1(3,2)*E3(3,2)-2*E1(1,1)*E1(3,3)*E3(3,3)+2*E1(3,1)*E1(3,2)*E3(1,2)+2*E1(1,3)*E1(3,3)*E3(3,1)+2*E1(1,1)*E1(1,2)*E3(1,2);
Mcoefs(43) = -E1(1,3)^2*E3(2,1)-E1(1,2)^2*E3(2,1)-E1(3,2)^2*E3(2,1)-E1(3,3)^2*E3(2,1)+E1(3,1)^2*E3(2,1)+E1(1,1)^2*E3(2,1)+3*E1(2,1)^2*E3(2,1)+E1(2,2)^2*E3(2,1)+E1(2,3)^2*E3(2,1)+2*E1(1,1)*E1(1,3)*E3(2,3)+2*E1(2,1)*E1(2,2)*E3(2,2)+2*E1(2,1)*E1(2,3)*E3(2,3)+2*E1(2,3)*E1(3,3)*E3(3,1)-2*E1(2,1)*E1(3,3)*E3(3,3)+2*E1(3,1)*E1(3,3)*E3(2,3)+2*E1(3,1)*E1(2,3)*E3(3,3)+2*E1(1,1)*E1(2,1)*E3(1,1)+2*E1(2,1)*E1(3,1)*E3(3,1)-2*E1(2,1)*E1(3,2)*E3(3,2)+2*E1(1,3)*E1(2,3)*E3(1,1)+2*E1(3,1)*E1(2,2)*E3(3,2)+2*E1(1,1)*E1(1,2)*E3(2,2)-2*E1(2,1)*E1(1,2)*E3(1,2)+2*E1(1,1)*E1(2,3)*E3(1,3)+2*E1(1,1)*E1(2,2)*E3(1,2)+2*E1(3,1)*E1(3,2)*E3(2,2)+2*E1(3,2)*E1(2,2)*E3(3,1)+2*E1(1,2)*E1(2,2)*E3(1,1)-2*E1(2,1)*E1(1,3)*E3(1,3);
Mcoefs(44) = 2*E1(1,1)*E1(1,2)*E3(3,2)+2*E1(2,1)*E1(2,2)*E3(3,2)+2*E1(1,3)*E1(3,3)*E3(1,1)+2*E1(2,1)*E1(3,1)*E3(2,1)-2*E1(3,1)*E1(1,2)*E3(1,2)+2*E1(3,2)*E1(2,2)*E3(2,1)+2*E1(1,1)*E1(1,3)*E3(3,3)+2*E1(1,1)*E1(3,3)*E3(1,3)+2*E1(3,1)*E1(3,2)*E3(3,2)+2*E1(2,1)*E1(3,3)*E3(2,3)+2*E1(1,2)*E1(3,2)*E3(1,1)+2*E1(2,1)*E1(3,2)*E3(2,2)-2*E1(3,1)*E1(2,3)*E3(2,3)-2*E1(3,1)*E1(2,2)*E3(2,2)+2*E1(1,1)*E1(3,1)*E3(1,1)+2*E1(1,1)*E1(3,2)*E3(1,2)+2*E1(2,1)*E1(2,3)*E3(3,3)+2*E1(2,3)*E1(3,3)*E3(2,1)-2*E1(3,1)*E1(1,3)*E3(1,3)+2*E1(3,1)*E1(3,3)*E3(3,3)+E1(1,1)^2*E3(3,1)-E1(2,3)^2*E3(3,1)+E1(3,2)^2*E3(3,1)-E1(2,2)^2*E3(3,1)-E1(1,2)^2*E3(3,1)-E1(1,3)^2*E3(3,1)+3*E1(3,1)^2*E3(3,1)+E1(2,1)^2*E3(3,1)+E1(3,3)^2*E3(3,1);
Mcoefs(45) = 2*E1(1,2)*E1(2,2)*E3(2,2)+2*E1(3,2)*E1(3,3)*E3(1,3)+2*E1(1,2)*E1(3,2)*E3(3,2)+2*E1(1,2)*E1(1,3)*E3(1,3)-2*E1(1,2)*E1(2,3)*E3(2,3)-2*E1(3,1)*E1(1,2)*E3(3,1)+2*E1(1,1)*E1(2,2)*E3(2,1)+2*E1(2,1)*E1(2,2)*E3(1,1)+2*E1(1,1)*E1(2,1)*E3(2,2)+2*E1(2,2)*E1(2,3)*E3(1,3)-2*E1(2,1)*E1(1,2)*E3(2,1)+2*E1(1,1)*E1(3,1)*E3(3,2)+2*E1(3,1)*E1(3,2)*E3(1,1)+2*E1(1,1)*E1(3,2)*E3(3,1)+2*E1(1,3)*E1(3,3)*E3(3,2)-2*E1(1,2)*E1(3,3)*E3(3,3)+2*E1(1,1)*E1(1,2)*E3(1,1)+E1(1,3)^2*E3(1,2)-E1(2,1)^2*E3(1,2)+3*E1(1,2)^2*E3(1,2)-E1(2,3)^2*E3(1,2)+E1(2,2)^2*E3(1,2)+E1(1,1)^2*E3(1,2)-E1(3,1)^2*E3(1,2)-E1(3,3)^2*E3(1,2)+2*E1(2,2)*E1(1,3)*E3(2,3)+2*E1(3,2)*E1(1,3)*E3(3,3)+2*E1(1,3)*E1(2,3)*E3(2,2)+E1(3,2)^2*E3(1,2);
Mcoefs(46) = E1(2,1)^2*E3(2,2)+E1(1,2)^2*E3(2,2)-E1(1,1)^2*E3(2,2)-E1(1,3)^2*E3(2,2)-E1(3,1)^2*E3(2,2)+3*E1(2,2)^2*E3(2,2)+E1(3,2)^2*E3(2,2)+E1(2,3)^2*E3(2,2)+2*E1(2,3)*E1(3,3)*E3(3,2)+2*E1(1,3)*E1(2,3)*E3(1,2)-2*E1(2,2)*E1(1,3)*E3(1,3)+2*E1(3,2)*E1(2,2)*E3(3,2)+2*E1(3,2)*E1(2,3)*E3(3,3)-E1(3,3)^2*E3(2,2)+2*E1(1,2)*E1(2,2)*E3(1,2)-2*E1(3,1)*E1(2,2)*E3(3,1)-2*E1(1,1)*E1(2,2)*E3(1,1)+2*E1(2,2)*E1(2,3)*E3(2,3)+2*E1(1,2)*E1(2,3)*E3(1,3)+2*E1(3,1)*E1(3,2)*E3(2,1)+2*E1(1,1)*E1(2,1)*E3(1,2)+2*E1(1,2)*E1(1,3)*E3(2,3)+2*E1(1,1)*E1(1,2)*E3(2,1)+2*E1(2,1)*E1(3,1)*E3(3,2)+2*E1(2,1)*E1(3,2)*E3(3,1)+2*E1(3,2)*E1(3,3)*E3(2,3)+2*E1(2,1)*E1(2,2)*E3(2,1)-2*E1(2,2)*E1(3,3)*E3(3,3)+2*E1(2,1)*E1(1,2)*E3(1,1);
Mcoefs(47) = E1(3,3)^2*E3(3,2)-E1(2,3)^2*E3(3,2)+E1(2,2)^2*E3(3,2)-E1(2,1)^2*E3(3,2)+E1(3,1)^2*E3(3,2)-E1(1,3)^2*E3(3,2)+3*E1(3,2)^2*E3(3,2)-E1(1,1)^2*E3(3,2)+2*E1(3,2)*E1(2,2)*E3(2,2)+2*E1(2,1)*E1(3,1)*E3(2,2)+2*E1(1,2)*E1(3,2)*E3(1,2)+2*E1(1,1)*E1(3,1)*E3(1,2)+2*E1(1,1)*E1(1,2)*E3(3,1)+2*E1(2,2)*E1(2,3)*E3(3,3)+2*E1(2,2)*E1(3,3)*E3(2,3)+2*E1(1,3)*E1(3,3)*E3(1,2)-2*E1(3,2)*E1(2,3)*E3(2,3)-2*E1(2,1)*E1(3,2)*E3(2,1)+E1(1,2)^2*E3(3,2)+2*E1(1,2)*E1(3,3)*E3(1,3)-2*E1(3,2)*E1(1,3)*E3(1,3)+2*E1(3,1)*E1(1,2)*E3(1,1)+2*E1(3,2)*E1(3,3)*E3(3,3)+2*E1(2,1)*E1(2,2)*E3(3,1)+2*E1(3,1)*E1(2,2)*E3(2,1)-2*E1(1,1)*E1(3,2)*E3(1,1)+2*E1(3,1)*E1(3,2)*E3(3,1)+2*E1(1,2)*E1(1,3)*E3(3,3)+2*E1(2,3)*E1(3,3)*E3(2,2);
Mcoefs(48) = E1(1,2)^2*E3(1,3)+E1(1,1)^2*E3(1,3)+E1(3,3)^2*E3(1,3)-E1(2,1)^2*E3(1,3)-E1(3,2)^2*E3(1,3)+E1(2,3)^2*E3(1,3)-E1(3,1)^2*E3(1,3)+3*E1(1,3)^2*E3(1,3)-2*E1(2,1)*E1(1,3)*E3(2,1)+2*E1(2,2)*E1(2,3)*E3(1,2)-E1(2,2)^2*E3(1,3)-2*E1(2,2)*E1(1,3)*E3(2,2)+2*E1(1,1)*E1(2,1)*E3(2,3)+2*E1(1,2)*E1(1,3)*E3(1,2)+2*E1(1,1)*E1(1,3)*E3(1,1)+2*E1(1,1)*E1(3,1)*E3(3,3)+2*E1(3,2)*E1(3,3)*E3(1,2)-2*E1(3,1)*E1(1,3)*E3(3,1)+2*E1(1,3)*E1(3,3)*E3(3,3)+2*E1(1,2)*E1(2,2)*E3(2,3)+2*E1(1,2)*E1(2,3)*E3(2,2)-2*E1(3,2)*E1(1,3)*E3(3,2)+2*E1(1,3)*E1(2,3)*E3(2,3)+2*E1(1,2)*E1(3,2)*E3(3,3)+2*E1(1,1)*E1(2,3)*E3(2,1)+2*E1(2,1)*E1(2,3)*E3(1,1)+2*E1(1,1)*E1(3,3)*E3(3,1)+2*E1(1,2)*E1(3,3)*E3(3,2)+2*E1(3,1)*E1(3,3)*E3(1,1);
Mcoefs(49) = -E1(3,2)^2*E3(2,3)-E1(3,1)^2*E3(2,3)+E1(2,1)^2*E3(2,3)+E1(2,2)^2*E3(2,3)+E1(1,3)^2*E3(2,3)+3*E1(2,3)^2*E3(2,3)-E1(1,2)^2*E3(2,3)+E1(3,3)^2*E3(2,3)-E1(1,1)^2*E3(2,3)+2*E1(1,2)*E1(2,2)*E3(1,3)+2*E1(1,1)*E1(2,1)*E3(1,3)-2*E1(1,1)*E1(2,3)*E3(1,1)+2*E1(2,3)*E1(3,3)*E3(3,3)+2*E1(1,1)*E1(1,3)*E3(2,1)+2*E1(2,1)*E1(2,3)*E3(2,1)-2*E1(3,2)*E1(2,3)*E3(3,2)+2*E1(3,1)*E1(3,3)*E3(2,1)+2*E1(2,1)*E1(3,3)*E3(3,1)-2*E1(3,1)*E1(2,3)*E3(3,1)+2*E1(2,2)*E1(3,3)*E3(3,2)+2*E1(3,2)*E1(3,3)*E3(2,2)+2*E1(1,2)*E1(1,3)*E3(2,2)-2*E1(1,2)*E1(2,3)*E3(1,2)+2*E1(1,3)*E1(2,3)*E3(1,3)+2*E1(2,1)*E1(3,1)*E3(3,3)+2*E1(2,2)*E1(1,3)*E3(1,2)+2*E1(2,2)*E1(2,3)*E3(2,2)+2*E1(3,2)*E1(2,2)*E3(3,3)+2*E1(2,1)*E1(1,3)*E3(1,1);
Mcoefs(50) = E1(1,3)^2*E3(3,3)+E1(3,1)^2*E3(3,3)+E1(3,2)^2*E3(3,3)+3*E1(3,3)^2*E3(3,3)-E1(2,2)^2*E3(3,3)-E1(1,1)^2*E3(3,3)-E1(2,1)^2*E3(3,3)-E1(1,2)^2*E3(3,3)+2*E1(1,1)*E1(3,1)*E3(1,3)+2*E1(3,1)*E1(2,3)*E3(2,1)+2*E1(2,2)*E1(2,3)*E3(3,2)+2*E1(2,1)*E1(2,3)*E3(3,1)+2*E1(2,3)*E1(3,3)*E3(2,3)-2*E1(1,2)*E1(3,3)*E3(1,2)+2*E1(1,2)*E1(3,2)*E3(1,3)+2*E1(1,1)*E1(1,3)*E3(3,1)+2*E1(3,1)*E1(1,3)*E3(1,1)-2*E1(2,1)*E1(3,3)*E3(2,1)-2*E1(1,1)*E1(3,3)*E3(1,1)+2*E1(1,3)*E1(3,3)*E3(1,3)+2*E1(2,1)*E1(3,1)*E3(2,3)+2*E1(3,2)*E1(3,3)*E3(3,2)+E1(2,3)^2*E3(3,3)+2*E1(1,2)*E1(1,3)*E3(3,2)+2*E1(3,2)*E1(1,3)*E3(1,2)-2*E1(2,2)*E1(3,3)*E3(2,2)+2*E1(3,2)*E1(2,3)*E3(2,2)+2*E1(3,2)*E1(2,2)*E3(2,3)+2*E1(3,1)*E1(3,3)*E3(3,1);
Mcoefs(51) = -E1(2,2)*E2(3,1)*E3(1,3)-E1(1,3)*E2(3,1)*E3(2,2)-E1(1,3)*E2(2,2)*E3(3,1)-E1(2,2)*E2(1,3)*E3(3,1)+E1(1,2)*E2(2,3)*E3(3,1)+E1(2,3)*E2(1,2)*E3(3,1)+E1(1,1)*E2(2,2)*E3(3,3)+E1(1,3)*E2(3,2)*E3(2,1)+E1(2,1)*E2(1,3)*E3(3,2)+E1(2,3)*E2(3,1)*E3(1,2)-E1(1,2)*E2(3,3)*E3(2,1)-E1(3,3)*E2(1,2)*E3(2,1)+E1(3,2)*E2(2,1)*E3(1,3)-E1(1,1)*E2(2,3)*E3(3,2)-E1(1,1)*E2(3,2)*E3(2,3)-E1(2,3)*E2(1,1)*E3(3,2)-E1(3,2)*E2(1,1)*E3(2,3)+E1(1,3)*E2(2,1)*E3(3,2)+E1(1,1)*E2(3,3)*E3(2,2)+E1(3,2)*E2(1,3)*E3(2,1)+E1(2,1)*E2(3,2)*E3(1,3)+E1(3,1)*E2(2,3)*E3(1,2)+E1(3,1)*E2(1,2)*E3(2,3)+E1(1,2)*E2(3,1)*E3(2,3)+E1(2,2)*E2(3,3)*E3(1,1)+E1(2,2)*E2(1,1)*E3(3,3)-E1(3,2)*E2(2,3)*E3(1,1)-E1(2,1)*E2(1,2)*E3(3,3)-E1(2,1)*E2(3,3)*E3(1,2)-E1(1,2)*E2(2,1)*E3(3,3)-E1(3,3)*E2(2,1)*E3(1,2)-E1(2,3)*E2(3,2)*E3(1,1)+E1(3,3)*E2(2,2)*E3(1,1)+E1(3,3)*E2(1,1)*E3(2,2)-E1(3,1)*E2(1,3)*E3(2,2)-E1(3,1)*E2(2,2)*E3(1,3);
Mcoefs(52) = 2*E1(1,3)*E2(2,3)*E3(2,1)+2*E1(1,3)*E2(2,1)*E3(2,3)+2*E1(2,3)*E2(1,3)*E3(2,1)+2*E1(2,1)*E2(2,3)*E3(1,3)+2*E1(3,1)*E2(1,1)*E3(3,1)+2*E1(3,1)*E2(3,1)*E3(1,1)+2*E1(1,2)*E2(3,1)*E3(3,2)+2*E1(3,2)*E2(1,2)*E3(3,1)+2*E1(1,2)*E2(3,2)*E3(3,1)+2*E1(3,1)*E2(1,2)*E3(3,2)+2*E1(3,2)*E2(3,1)*E3(1,2)+2*E1(3,1)*E2(3,2)*E3(1,2)+2*E1(3,1)*E2(3,3)*E3(1,3)-2*E1(3,2)*E2(3,2)*E3(1,1)-2*E1(3,2)*E2(1,1)*E3(3,2)-2*E1(1,1)*E2(3,2)*E3(3,2)-2*E1(3,3)*E2(3,3)*E3(1,1)-2*E1(3,3)*E2(1,1)*E3(3,3)-2*E1(1,1)*E2(3,3)*E3(3,3)+2*E1(1,1)*E2(2,1)*E3(2,1)+2*E1(2,1)*E2(1,1)*E3(2,1)+2*E1(2,1)*E2(2,1)*E3(1,1)+2*E1(1,2)*E2(2,2)*E3(2,1)+2*E1(1,2)*E2(2,1)*E3(2,2)+2*E1(1,1)*E2(1,2)*E3(1,2)-2*E1(1,1)*E2(2,2)*E3(2,2)-2*E1(1,1)*E2(2,3)*E3(2,3)-2*E1(2,2)*E2(2,2)*E3(1,1)+2*E1(1,1)*E2(3,1)*E3(3,1)-2*E1(2,2)*E2(1,1)*E3(2,2)+2*E1(2,1)*E2(1,3)*E3(2,3)+2*E1(2,3)*E2(2,1)*E3(1,3)+2*E1(1,3)*E2(1,3)*E3(1,1)+2*E1(1,3)*E2(1,1)*E3(1,3)+2*E1(1,1)*E2(1,3)*E3(1,3)-2*E1(2,3)*E2(2,3)*E3(1,1)-2*E1(2,3)*E2(1,1)*E3(2,3)+2*E1(1,3)*E2(3,3)*E3(3,1)+2*E1(1,3)*E2(3,1)*E3(3,3)+2*E1(3,3)*E2(1,3)*E3(3,1)+2*E1(3,1)*E2(1,3)*E3(3,3)+2*E1(3,3)*E2(3,1)*E3(1,3)+6*E1(1,1)*E2(1,1)*E3(1,1)+2*E1(1,2)*E2(1,2)*E3(1,1)+2*E1(1,2)*E2(1,1)*E3(1,2)+2*E1(2,2)*E2(1,2)*E3(2,1)+2*E1(2,1)*E2(1,2)*E3(2,2)+2*E1(2,2)*E2(2,1)*E3(1,2)+2*E1(2,1)*E2(2,2)*E3(1,2);
Mcoefs(53) = 2*E1(1,1)*E2(2,1)*E3(1,1)+2*E1(2,3)*E2(3,3)*E3(3,1)+2*E1(2,3)*E2(3,1)*E3(3,3)+2*E1(3,3)*E2(2,3)*E3(3,1)+2*E1(3,3)*E2(3,1)*E3(2,3)+2*E1(3,1)*E2(2,3)*E3(3,3)+2*E1(3,1)*E2(3,3)*E3(2,3)+2*E1(2,1)*E2(3,1)*E3(3,1)+2*E1(3,1)*E2(2,1)*E3(3,1)+2*E1(2,1)*E2(1,1)*E3(1,1)+2*E1(3,1)*E2(3,1)*E3(2,1)+2*E1(2,2)*E2(3,2)*E3(3,1)+2*E1(2,2)*E2(3,1)*E3(3,2)+2*E1(3,2)*E2(2,2)*E3(3,1)+2*E1(2,2)*E2(1,2)*E3(1,1)+2*E1(1,1)*E2(1,3)*E3(2,3)+2*E1(1,1)*E2(2,3)*E3(1,3)+2*E1(2,3)*E2(1,1)*E3(1,3)+2*E1(2,3)*E2(1,3)*E3(1,1)+6*E1(2,1)*E2(2,1)*E3(2,1)+2*E1(2,2)*E2(2,2)*E3(2,1)+2*E1(2,2)*E2(2,1)*E3(2,2)+2*E1(2,1)*E2(2,2)*E3(2,2)+2*E1(1,2)*E2(2,2)*E3(1,1)-2*E1(3,3)*E2(3,3)*E3(2,1)+2*E1(2,2)*E2(1,1)*E3(1,2)+2*E1(1,1)*E2(1,2)*E3(2,2)+2*E1(1,3)*E2(2,3)*E3(1,1)+2*E1(1,1)*E2(2,2)*E3(1,2)-2*E1(2,1)*E2(1,2)*E3(1,2)-2*E1(1,2)*E2(1,2)*E3(2,1)-2*E1(1,2)*E2(2,1)*E3(1,2)-2*E1(2,1)*E2(3,2)*E3(3,2)-2*E1(3,2)*E2(3,2)*E3(2,1)+2*E1(1,2)*E2(1,1)*E3(2,2)-2*E1(3,2)*E2(2,1)*E3(3,2)-2*E1(2,1)*E2(1,3)*E3(1,3)-2*E1(1,3)*E2(1,3)*E3(2,1)-2*E1(2,1)*E2(3,3)*E3(3,3)-2*E1(3,3)*E2(2,1)*E3(3,3)-2*E1(1,3)*E2(2,1)*E3(1,3)+2*E1(1,3)*E2(1,1)*E3(2,3)+2*E1(2,3)*E2(2,3)*E3(2,1)+2*E1(2,3)*E2(2,1)*E3(2,3)+2*E1(2,1)*E2(2,3)*E3(2,3)+2*E1(1,1)*E2(1,1)*E3(2,1)+2*E1(3,1)*E2(2,2)*E3(3,2)+2*E1(3,1)*E2(3,2)*E3(2,2)+2*E1(3,2)*E2(3,1)*E3(2,2);
Mcoefs(54) = 2*E1(3,3)*E2(3,1)*E3(3,3)+2*E1(3,1)*E2(3,3)*E3(3,3)+2*E1(1,1)*E2(3,2)*E3(1,2)+2*E1(1,3)*E2(3,3)*E3(1,1)+2*E1(2,3)*E2(2,1)*E3(3,3)+2*E1(2,3)*E2(3,3)*E3(2,1)+2*E1(2,1)*E2(2,2)*E3(3,2)+2*E1(3,2)*E2(2,2)*E3(2,1)+2*E1(2,1)*E2(3,2)*E3(2,2)+2*E1(3,2)*E2(2,1)*E3(2,2)+2*E1(3,3)*E2(1,3)*E3(1,1)+2*E1(1,1)*E2(1,3)*E3(3,3)+2*E1(3,1)*E2(1,1)*E3(1,1)+2*E1(1,1)*E2(3,1)*E3(1,1)+2*E1(1,3)*E2(1,1)*E3(3,3)+2*E1(1,2)*E2(3,2)*E3(1,1)-2*E1(1,3)*E2(3,1)*E3(1,3)+2*E1(3,1)*E2(2,1)*E3(2,1)-2*E1(1,3)*E2(1,3)*E3(3,1)+2*E1(1,1)*E2(1,2)*E3(3,2)+2*E1(3,2)*E2(1,2)*E3(1,1)+2*E1(2,2)*E2(3,2)*E3(2,1)+2*E1(2,1)*E2(2,1)*E3(3,1)-2*E1(3,1)*E2(1,2)*E3(1,2)+2*E1(1,1)*E2(1,1)*E3(3,1)+2*E1(2,1)*E2(3,1)*E3(2,1)+6*E1(3,1)*E2(3,1)*E3(3,1)+2*E1(3,3)*E2(1,1)*E3(1,3)+2*E1(1,1)*E2(3,3)*E3(1,3)+2*E1(2,2)*E2(2,1)*E3(3,2)-2*E1(1,2)*E2(3,1)*E3(1,2)-2*E1(2,2)*E2(3,1)*E3(2,2)-2*E1(2,2)*E2(2,2)*E3(3,1)+2*E1(3,2)*E2(3,2)*E3(3,1)-2*E1(3,1)*E2(1,3)*E3(1,3)+2*E1(3,2)*E2(1,1)*E3(1,2)-2*E1(1,2)*E2(1,2)*E3(3,1)-2*E1(2,3)*E2(2,3)*E3(3,1)+2*E1(2,1)*E2(3,3)*E3(2,3)+2*E1(3,2)*E2(3,1)*E3(3,2)+2*E1(3,1)*E2(3,2)*E3(3,2)+2*E1(3,3)*E2(2,1)*E3(2,3)-2*E1(3,1)*E2(2,2)*E3(2,2)+2*E1(3,3)*E2(2,3)*E3(2,1)+2*E1(2,1)*E2(2,3)*E3(3,3)+2*E1(3,3)*E2(3,3)*E3(3,1)-2*E1(3,1)*E2(2,3)*E3(2,3)-2*E1(2,3)*E2(3,1)*E3(2,3)+2*E1(1,2)*E2(1,1)*E3(3,2);
Mcoefs(55) = 2*E1(3,2)*E2(3,1)*E3(1,1)+2*E1(3,2)*E2(3,2)*E3(1,2)+2*E1(3,2)*E2(1,2)*E3(3,2)-2*E1(3,3)*E2(1,2)*E3(3,3)-2*E1(3,3)*E2(3,3)*E3(1,2)+2*E1(1,3)*E2(3,2)*E3(3,3)-2*E1(1,2)*E2(3,3)*E3(3,3)-2*E1(1,2)*E2(2,1)*E3(2,1)+2*E1(1,1)*E2(1,1)*E3(1,2)+2*E1(2,2)*E2(2,2)*E3(1,2)+2*E1(2,2)*E2(1,3)*E3(2,3)+2*E1(2,3)*E2(1,3)*E3(2,2)+2*E1(1,3)*E2(2,2)*E3(2,3)-2*E1(3,1)*E2(3,1)*E3(1,2)+2*E1(1,3)*E2(2,3)*E3(2,2)+2*E1(2,2)*E2(2,3)*E3(1,3)+2*E1(3,3)*E2(1,3)*E3(3,2)-2*E1(3,1)*E2(1,2)*E3(3,1)+2*E1(2,3)*E2(2,2)*E3(1,3)+2*E1(1,1)*E2(2,2)*E3(2,1)+2*E1(1,1)*E2(2,1)*E3(2,2)+2*E1(3,3)*E2(3,2)*E3(1,3)-2*E1(1,2)*E2(3,1)*E3(3,1)+2*E1(2,1)*E2(1,1)*E3(2,2)+2*E1(3,2)*E2(1,3)*E3(3,3)+2*E1(2,2)*E2(1,1)*E3(2,1)-2*E1(2,3)*E2(2,3)*E3(1,2)+2*E1(1,1)*E2(1,2)*E3(1,1)+2*E1(1,2)*E2(1,1)*E3(1,1)+2*E1(2,2)*E2(1,2)*E3(2,2)+2*E1(2,2)*E2(2,1)*E3(1,1)-2*E1(2,3)*E2(1,2)*E3(2,3)+2*E1(2,1)*E2(2,2)*E3(1,1)+2*E1(1,2)*E2(2,2)*E3(2,2)-2*E1(1,2)*E2(2,3)*E3(2,3)-2*E1(2,1)*E2(2,1)*E3(1,2)+2*E1(3,1)*E2(3,2)*E3(1,1)+2*E1(1,2)*E2(1,3)*E3(1,3)+6*E1(1,2)*E2(1,2)*E3(1,2)-2*E1(2,1)*E2(1,2)*E3(2,1)+2*E1(1,1)*E2(3,1)*E3(3,2)+2*E1(3,2)*E2(3,3)*E3(1,3)+2*E1(1,3)*E2(1,2)*E3(1,3)+2*E1(1,3)*E2(1,3)*E3(1,2)+2*E1(1,3)*E2(3,3)*E3(3,2)+2*E1(1,1)*E2(3,2)*E3(3,1)+2*E1(3,2)*E2(1,1)*E3(3,1)+2*E1(3,1)*E2(1,1)*E3(3,2)+2*E1(1,2)*E2(3,2)*E3(3,2);
Mcoefs(56) = -2*E1(1,3)*E2(1,3)*E3(2,2)-2*E1(1,3)*E2(2,2)*E3(1,3)-2*E1(2,2)*E2(1,3)*E3(1,3)+2*E1(3,1)*E2(3,2)*E3(2,1)-2*E1(1,1)*E2(1,1)*E3(2,2)-2*E1(2,2)*E2(3,3)*E3(3,3)+2*E1(1,2)*E2(2,3)*E3(1,3)+2*E1(1,1)*E2(1,2)*E3(2,1)+2*E1(1,1)*E2(2,1)*E3(1,2)+2*E1(2,3)*E2(1,3)*E3(1,2)-2*E1(3,1)*E2(2,2)*E3(3,1)-2*E1(3,1)*E2(3,1)*E3(2,2)+2*E1(1,2)*E2(1,1)*E3(2,1)+2*E1(2,3)*E2(3,2)*E3(3,3)+2*E1(1,3)*E2(2,3)*E3(1,2)+2*E1(2,3)*E2(3,3)*E3(3,2)+2*E1(1,3)*E2(1,2)*E3(2,3)+2*E1(2,1)*E2(2,2)*E3(2,1)+2*E1(1,2)*E2(2,2)*E3(1,2)+2*E1(3,2)*E2(3,2)*E3(2,2)+2*E1(2,1)*E2(2,1)*E3(2,2)+2*E1(3,2)*E2(2,2)*E3(3,2)+2*E1(2,2)*E2(3,2)*E3(3,2)-2*E1(3,3)*E2(2,2)*E3(3,3)+2*E1(3,2)*E2(3,1)*E3(2,1)+2*E1(3,1)*E2(2,1)*E3(3,2)+2*E1(2,2)*E2(2,3)*E3(2,3)+2*E1(2,1)*E2(1,2)*E3(1,1)+6*E1(2,2)*E2(2,2)*E3(2,2)+2*E1(2,1)*E2(3,2)*E3(3,1)+2*E1(2,3)*E2(2,2)*E3(2,3)+2*E1(2,3)*E2(2,3)*E3(2,2)+2*E1(3,2)*E2(2,1)*E3(3,1)+2*E1(2,2)*E2(2,1)*E3(2,1)+2*E1(1,2)*E2(1,3)*E3(2,3)-2*E1(3,3)*E2(3,3)*E3(2,2)+2*E1(2,1)*E2(3,1)*E3(3,2)+2*E1(3,2)*E2(3,3)*E3(2,3)+2*E1(2,2)*E2(1,2)*E3(1,2)+2*E1(1,2)*E2(1,2)*E3(2,2)-2*E1(2,2)*E2(3,1)*E3(3,1)+2*E1(3,3)*E2(2,3)*E3(3,2)+2*E1(1,2)*E2(2,1)*E3(1,1)+2*E1(3,2)*E2(2,3)*E3(3,3)+2*E1(3,3)*E2(3,2)*E3(2,3)+2*E1(2,3)*E2(1,2)*E3(1,3)+2*E1(2,1)*E2(1,1)*E3(1,2)-2*E1(1,1)*E2(2,2)*E3(1,1)-2*E1(2,2)*E2(1,1)*E3(1,1);
Mcoefs(57) = 2*E1(1,3)*E2(3,3)*E3(1,2)+2*E1(1,2)*E2(1,2)*E3(3,2)+2*E1(1,1)*E2(3,1)*E3(1,2)+2*E1(3,3)*E2(1,3)*E3(1,2)-2*E1(1,1)*E2(1,1)*E3(3,2)-2*E1(1,1)*E2(3,2)*E3(1,1)+2*E1(3,1)*E2(3,1)*E3(3,2)+2*E1(3,2)*E2(1,2)*E3(1,2)-2*E1(1,3)*E2(3,2)*E3(1,3)+2*E1(3,2)*E2(3,1)*E3(3,1)+2*E1(3,1)*E2(1,2)*E3(1,1)+2*E1(1,2)*E2(1,1)*E3(3,1)+2*E1(3,1)*E2(3,2)*E3(3,1)+2*E1(2,1)*E2(3,1)*E3(2,2)+2*E1(2,1)*E2(2,2)*E3(3,1)+2*E1(3,1)*E2(2,1)*E3(2,2)+2*E1(1,3)*E2(1,2)*E3(3,3)+2*E1(3,2)*E2(2,2)*E3(2,2)+2*E1(1,2)*E2(3,1)*E3(1,1)+2*E1(2,3)*E2(3,3)*E3(2,2)+2*E1(1,2)*E2(3,2)*E3(1,2)+2*E1(2,2)*E2(2,1)*E3(3,1)+2*E1(3,1)*E2(2,2)*E3(2,1)+2*E1(2,2)*E2(3,1)*E3(2,1)+2*E1(2,2)*E2(3,2)*E3(2,2)+2*E1(2,2)*E2(2,2)*E3(3,2)+6*E1(3,2)*E2(3,2)*E3(3,2)+2*E1(2,3)*E2(2,2)*E3(3,3)+2*E1(3,3)*E2(3,3)*E3(3,2)-2*E1(3,2)*E2(1,1)*E3(1,1)-2*E1(3,2)*E2(2,3)*E3(2,3)+2*E1(3,3)*E2(2,3)*E3(2,2)+2*E1(1,2)*E2(1,3)*E3(3,3)-2*E1(2,3)*E2(2,3)*E3(3,2)+2*E1(2,2)*E2(2,3)*E3(3,3)-2*E1(2,3)*E2(3,2)*E3(2,3)-2*E1(3,2)*E2(1,3)*E3(1,3)+2*E1(3,2)*E2(3,3)*E3(3,3)+2*E1(3,3)*E2(1,2)*E3(1,3)-2*E1(1,3)*E2(1,3)*E3(3,2)+2*E1(1,2)*E2(3,3)*E3(1,3)+2*E1(3,3)*E2(2,2)*E3(2,3)+2*E1(3,1)*E2(1,1)*E3(1,2)+2*E1(1,1)*E2(1,2)*E3(3,1)+2*E1(2,2)*E2(3,3)*E3(2,3)-2*E1(2,1)*E2(2,1)*E3(3,2)-2*E1(2,1)*E2(3,2)*E3(2,1)-2*E1(3,2)*E2(2,1)*E3(2,1)+2*E1(3,3)*E2(3,2)*E3(3,3);
Mcoefs(58) = 2*E1(1,3)*E2(2,3)*E3(2,3)-2*E1(1,3)*E2(3,2)*E3(3,2)+2*E1(3,3)*E2(3,2)*E3(1,2)+2*E1(1,3)*E2(3,3)*E3(3,3)-2*E1(3,1)*E2(1,3)*E3(3,1)+2*E1(1,2)*E2(2,3)*E3(2,2)-2*E1(1,3)*E2(2,2)*E3(2,2)+2*E1(1,2)*E2(1,2)*E3(1,3)+2*E1(1,2)*E2(1,3)*E3(1,2)+2*E1(1,3)*E2(1,2)*E3(1,2)+6*E1(1,3)*E2(1,3)*E3(1,3)+2*E1(1,1)*E2(2,3)*E3(2,1)-2*E1(2,1)*E2(2,1)*E3(1,3)-2*E1(3,2)*E2(1,3)*E3(3,2)-2*E1(1,3)*E2(2,1)*E3(2,1)+2*E1(2,1)*E2(1,1)*E3(2,3)+2*E1(1,1)*E2(3,1)*E3(3,3)-2*E1(2,1)*E2(1,3)*E3(2,1)+2*E1(1,2)*E2(3,2)*E3(3,3)+2*E1(2,3)*E2(1,2)*E3(2,2)+2*E1(2,2)*E2(1,2)*E3(2,3)+2*E1(2,2)*E2(2,3)*E3(1,2)+2*E1(3,1)*E2(3,3)*E3(1,1)+2*E1(1,1)*E2(3,3)*E3(3,1)+2*E1(3,2)*E2(1,2)*E3(3,3)+2*E1(3,3)*E2(3,3)*E3(1,3)+2*E1(1,2)*E2(3,3)*E3(3,2)+2*E1(2,3)*E2(1,1)*E3(2,1)-2*E1(3,1)*E2(3,1)*E3(1,3)+2*E1(3,3)*E2(1,1)*E3(3,1)+2*E1(2,3)*E2(2,1)*E3(1,1)+2*E1(1,2)*E2(2,2)*E3(2,3)+2*E1(2,1)*E2(2,3)*E3(1,1)-2*E1(3,2)*E2(3,2)*E3(1,3)+2*E1(2,3)*E2(1,3)*E3(2,3)+2*E1(2,3)*E2(2,3)*E3(1,3)+2*E1(3,1)*E2(1,1)*E3(3,3)+2*E1(3,3)*E2(1,2)*E3(3,2)+2*E1(3,2)*E2(3,3)*E3(1,2)-2*E1(1,3)*E2(3,1)*E3(3,1)+2*E1(2,3)*E2(2,2)*E3(1,2)+2*E1(3,3)*E2(1,3)*E3(3,3)+2*E1(3,3)*E2(3,1)*E3(1,1)-2*E1(2,2)*E2(2,2)*E3(1,3)-2*E1(2,2)*E2(1,3)*E3(2,2)+2*E1(1,1)*E2(2,1)*E3(2,3)+2*E1(1,1)*E2(1,3)*E3(1,1)+2*E1(1,3)*E2(1,1)*E3(1,1)+2*E1(1,1)*E2(1,1)*E3(1,3);
Mcoefs(59) = 2*E1(2,2)*E2(3,2)*E3(3,3)-2*E1(3,2)*E2(2,3)*E3(3,2)+2*E1(2,2)*E2(3,3)*E3(3,2)+2*E1(3,3)*E2(3,1)*E3(2,1)-2*E1(1,1)*E2(2,3)*E3(1,1)-2*E1(2,3)*E2(3,2)*E3(3,2)+2*E1(3,3)*E2(3,2)*E3(2,2)-2*E1(3,2)*E2(3,2)*E3(2,3)+2*E1(2,1)*E2(2,3)*E3(2,1)+2*E1(3,1)*E2(2,1)*E3(3,3)+2*E1(3,3)*E2(2,1)*E3(3,1)+2*E1(3,1)*E2(3,3)*E3(2,1)+2*E1(1,2)*E2(1,3)*E3(2,2)+2*E1(2,2)*E2(1,2)*E3(1,3)+2*E1(1,3)*E2(1,2)*E3(2,2)+2*E1(2,2)*E2(1,3)*E3(1,2)-2*E1(2,3)*E2(1,2)*E3(1,2)+2*E1(2,1)*E2(3,1)*E3(3,3)-2*E1(3,1)*E2(3,1)*E3(2,3)+2*E1(2,1)*E2(3,3)*E3(3,1)+2*E1(1,3)*E2(1,1)*E3(2,1)-2*E1(3,1)*E2(2,3)*E3(3,1)+2*E1(3,2)*E2(3,3)*E3(2,2)+2*E1(3,3)*E2(2,3)*E3(3,3)+2*E1(3,3)*E2(3,3)*E3(2,3)+2*E1(1,3)*E2(2,2)*E3(1,2)+2*E1(2,2)*E2(2,2)*E3(2,3)+2*E1(2,3)*E2(2,2)*E3(2,2)+2*E1(2,2)*E2(2,3)*E3(2,2)+2*E1(1,3)*E2(1,3)*E3(2,3)+2*E1(2,3)*E2(1,3)*E3(1,3)+2*E1(1,3)*E2(2,3)*E3(1,3)+2*E1(1,2)*E2(2,2)*E3(1,3)-2*E1(1,2)*E2(2,3)*E3(1,2)+2*E1(1,1)*E2(2,1)*E3(1,3)+2*E1(1,1)*E2(1,3)*E3(2,1)+2*E1(2,1)*E2(1,1)*E3(1,3)+2*E1(2,3)*E2(2,1)*E3(2,1)-2*E1(1,1)*E2(1,1)*E3(2,3)-2*E1(1,2)*E2(1,2)*E3(2,3)+6*E1(2,3)*E2(2,3)*E3(2,3)+2*E1(2,1)*E2(2,1)*E3(2,3)-2*E1(2,3)*E2(3,1)*E3(3,1)-2*E1(2,3)*E2(1,1)*E3(1,1)+2*E1(3,2)*E2(2,2)*E3(3,3)+2*E1(3,3)*E2(2,2)*E3(3,2)+2*E1(2,1)*E2(1,3)*E3(1,1)+2*E1(1,3)*E2(2,1)*E3(1,1)+2*E1(2,3)*E2(3,3)*E3(3,3);
Mcoefs(60) = -2*E1(2,2)*E2(2,2)*E3(3,3)+2*E1(3,1)*E2(1,3)*E3(1,1)+2*E1(1,3)*E2(1,1)*E3(3,1)-2*E1(2,1)*E2(2,1)*E3(3,3)+2*E1(3,3)*E2(2,3)*E3(2,3)+2*E1(3,2)*E2(2,3)*E3(2,2)-2*E1(3,3)*E2(1,1)*E3(1,1)-2*E1(1,2)*E2(1,2)*E3(3,3)-2*E1(1,1)*E2(1,1)*E3(3,3)-2*E1(1,1)*E2(3,3)*E3(1,1)+2*E1(3,2)*E2(1,2)*E3(1,3)+2*E1(3,2)*E2(1,3)*E3(1,2)+2*E1(1,3)*E2(3,2)*E3(1,2)+2*E1(3,1)*E2(2,1)*E3(2,3)+2*E1(2,3)*E2(2,1)*E3(3,1)+2*E1(1,2)*E2(3,2)*E3(1,3)+2*E1(3,1)*E2(2,3)*E3(2,1)+2*E1(2,3)*E2(3,1)*E3(2,1)-2*E1(2,1)*E2(3,3)*E3(2,1)+2*E1(3,3)*E2(3,2)*E3(3,2)+6*E1(3,3)*E2(3,3)*E3(3,3)-2*E1(1,2)*E2(3,3)*E3(1,2)-2*E1(3,3)*E2(2,1)*E3(2,1)+2*E1(1,3)*E2(3,1)*E3(1,1)+2*E1(3,2)*E2(3,3)*E3(3,2)+2*E1(1,3)*E2(1,2)*E3(3,2)+2*E1(2,2)*E2(3,2)*E3(2,3)+2*E1(2,2)*E2(2,3)*E3(3,2)+2*E1(3,2)*E2(2,2)*E3(2,3)+2*E1(2,3)*E2(2,2)*E3(3,2)+2*E1(2,3)*E2(3,2)*E3(2,2)+2*E1(2,3)*E2(2,3)*E3(3,3)-2*E1(3,3)*E2(1,2)*E3(1,2)+2*E1(2,3)*E2(3,3)*E3(2,3)+2*E1(2,1)*E2(3,1)*E3(2,3)+2*E1(2,1)*E2(2,3)*E3(3,1)+2*E1(1,1)*E2(1,3)*E3(3,1)-2*E1(3,3)*E2(2,2)*E3(2,2)+2*E1(3,1)*E2(1,1)*E3(1,3)+2*E1(1,3)*E2(1,3)*E3(3,3)+2*E1(3,3)*E2(1,3)*E3(1,3)+2*E1(1,3)*E2(3,3)*E3(1,3)+2*E1(1,2)*E2(1,3)*E3(3,2)+2*E1(1,1)*E2(3,1)*E3(1,3)+2*E1(3,2)*E2(3,2)*E3(3,3)+2*E1(3,1)*E2(3,1)*E3(3,3)+2*E1(3,1)*E2(3,3)*E3(3,1)+2*E1(3,3)*E2(3,1)*E3(3,1)-2*E1(2,2)*E2(3,3)*E3(2,2);
Mcoefs(61) = E2(1,1)*E2(2,2)*E3(3,3)+E2(3,1)*E2(1,2)*E3(2,3)+E2(1,1)*E2(3,3)*E3(2,2)+E2(2,1)*E2(3,2)*E3(1,3)+E2(2,2)*E2(3,3)*E3(1,1)+E2(3,1)*E2(2,3)*E3(1,2)+E2(1,2)*E2(2,3)*E3(3,1)+E2(3,2)*E2(1,3)*E3(2,1)-E2(1,1)*E2(2,3)*E3(3,2)-E2(1,1)*E2(3,2)*E3(2,3)-E2(3,2)*E2(2,3)*E3(1,1)-E2(2,1)*E2(1,2)*E3(3,3)-E2(2,1)*E2(3,3)*E3(1,2)-E2(1,2)*E2(3,3)*E3(2,1)+E2(2,1)*E2(1,3)*E3(3,2)-E2(3,1)*E2(1,3)*E3(2,2)-E2(3,1)*E2(2,2)*E3(1,3)-E2(2,2)*E2(1,3)*E3(3,1);
Mcoefs(62) = 2*E2(1,2)*E2(2,2)*E3(2,1)+2*E2(3,1)*E2(3,2)*E3(1,2)+2*E2(3,1)*E2(1,3)*E3(3,3)+2*E2(3,1)*E2(3,3)*E3(1,3)+2*E2(1,2)*E2(3,2)*E3(3,1)-2*E2(1,1)*E2(3,2)*E3(3,2)-2*E2(1,1)*E2(3,3)*E3(3,3)+2*E2(1,1)*E2(2,1)*E3(2,1)+2*E2(2,1)*E2(1,2)*E3(2,2)+2*E2(2,1)*E2(2,2)*E3(1,2)+2*E2(2,1)*E2(2,3)*E3(1,3)+2*E2(1,1)*E2(1,3)*E3(1,3)+2*E2(1,1)*E2(3,1)*E3(3,1)+2*E2(3,1)*E2(1,2)*E3(3,2)+2*E2(1,3)*E2(3,3)*E3(3,1)+2*E2(1,3)*E2(2,3)*E3(2,1)-2*E2(1,1)*E2(2,3)*E3(2,3)-2*E2(1,1)*E2(2,2)*E3(2,2)+2*E2(2,1)*E2(1,3)*E3(2,3)+2*E2(1,1)*E2(1,2)*E3(1,2)+E2(2,1)^2*E3(1,1)+E2(3,1)^2*E3(1,1)+3*E2(1,1)^2*E3(1,1)-E2(2,3)^2*E3(1,1)+E2(1,2)^2*E3(1,1)-E2(2,2)^2*E3(1,1)-E2(3,3)^2*E3(1,1)+E2(1,3)^2*E3(1,1)-E2(3,2)^2*E3(1,1);
Mcoefs(63) = E2(1,1)^2*E3(2,1)-E2(3,2)^2*E3(2,1)-E2(3,3)^2*E3(2,1)+E2(2,3)^2*E3(2,1)+3*E2(2,1)^2*E3(2,1)+E2(2,2)^2*E3(2,1)-E2(1,2)^2*E3(2,1)-E2(1,3)^2*E3(2,1)+E2(3,1)^2*E3(2,1)+2*E2(2,1)*E2(2,3)*E3(2,3)-2*E2(2,1)*E2(3,3)*E3(3,3)+2*E2(3,1)*E2(3,3)*E3(2,3)+2*E2(2,1)*E2(2,2)*E3(2,2)-2*E2(2,1)*E2(1,3)*E3(1,3)+2*E2(1,1)*E2(2,1)*E3(1,1)+2*E2(1,2)*E2(2,2)*E3(1,1)+2*E2(3,2)*E2(2,2)*E3(3,1)-2*E2(2,1)*E2(1,2)*E3(1,2)-2*E2(2,1)*E2(3,2)*E3(3,2)+2*E2(1,1)*E2(1,3)*E3(2,3)+2*E2(3,1)*E2(3,2)*E3(2,2)+2*E2(2,3)*E2(3,3)*E3(3,1)+2*E2(3,1)*E2(2,3)*E3(3,3)+2*E2(2,1)*E2(3,1)*E3(3,1)+2*E2(1,1)*E2(2,3)*E3(1,3)+2*E2(1,3)*E2(2,3)*E3(1,1)+2*E2(1,1)*E2(2,2)*E3(1,2)+2*E2(1,1)*E2(1,2)*E3(2,2)+2*E2(3,1)*E2(2,2)*E3(3,2);
Mcoefs(64) = E2(3,2)^2*E3(3,1)+E2(2,1)^2*E3(3,1)-E2(1,2)^2*E3(3,1)-E2(1,3)^2*E3(3,1)+E2(3,3)^2*E3(3,1)+E2(1,1)^2*E3(3,1)+3*E2(3,1)^2*E3(3,1)-E2(2,3)^2*E3(3,1)-E2(2,2)^2*E3(3,1)+2*E2(1,1)*E2(1,3)*E3(3,3)+2*E2(1,1)*E2(3,2)*E3(1,2)+2*E2(2,1)*E2(2,2)*E3(3,2)+2*E2(1,1)*E2(1,2)*E3(3,2)+2*E2(3,2)*E2(2,2)*E3(2,1)+2*E2(1,2)*E2(3,2)*E3(1,1)+2*E2(1,1)*E2(3,3)*E3(1,3)+2*E2(1,1)*E2(3,1)*E3(1,1)+2*E2(2,1)*E2(3,1)*E3(2,1)-2*E2(3,1)*E2(1,2)*E3(1,2)+2*E2(3,1)*E2(3,3)*E3(3,3)+2*E2(2,3)*E2(3,3)*E3(2,1)-2*E2(3,1)*E2(1,3)*E3(1,3)+2*E2(1,3)*E2(3,3)*E3(1,1)+2*E2(2,1)*E2(3,3)*E3(2,3)+2*E2(3,1)*E2(3,2)*E3(3,2)-2*E2(3,1)*E2(2,3)*E3(2,3)+2*E2(2,1)*E2(2,3)*E3(3,3)-2*E2(3,1)*E2(2,2)*E3(2,2)+2*E2(2,1)*E2(3,2)*E3(2,2);
Mcoefs(65) = 3*E2(1,2)^2*E3(1,2)-E2(2,1)^2*E3(1,2)-E2(2,3)^2*E3(1,2)+E2(1,3)^2*E3(1,2)+E2(1,1)^2*E3(1,2)+E2(3,2)^2*E3(1,2)-E2(3,1)^2*E3(1,2)+E2(2,2)^2*E3(1,2)+2*E2(1,1)*E2(1,2)*E3(1,1)+2*E2(1,2)*E2(2,2)*E3(2,2)+2*E2(1,1)*E2(2,2)*E3(2,1)+2*E2(2,1)*E2(2,2)*E3(1,1)+2*E2(1,1)*E2(3,2)*E3(3,1)-E2(3,3)^2*E3(1,2)+2*E2(1,1)*E2(2,1)*E3(2,2)+2*E2(1,2)*E2(3,2)*E3(3,2)+2*E2(1,3)*E2(2,3)*E3(2,2)+2*E2(3,2)*E2(3,3)*E3(1,3)+2*E2(1,2)*E2(1,3)*E3(1,3)+2*E2(1,1)*E2(3,1)*E3(3,2)+2*E2(3,2)*E2(1,3)*E3(3,3)+2*E2(2,2)*E2(2,3)*E3(1,3)+2*E2(1,3)*E2(3,3)*E3(3,2)-2*E2(1,2)*E2(3,3)*E3(3,3)-2*E2(2,1)*E2(1,2)*E3(2,1)-2*E2(1,2)*E2(2,3)*E3(2,3)-2*E2(3,1)*E2(1,2)*E3(3,1)+2*E2(2,2)*E2(1,3)*E3(2,3)+2*E2(3,1)*E2(3,2)*E3(1,1);
Mcoefs(66) = 2*E2(1,2)*E2(2,3)*E3(1,3)-2*E2(2,2)*E2(3,3)*E3(3,3)+2*E2(3,2)*E2(2,3)*E3(3,3)+E2(2,3)^2*E3(2,2)-E2(3,1)^2*E3(2,2)-E2(1,3)^2*E3(2,2)-E2(1,1)^2*E3(2,2)+E2(2,1)^2*E3(2,2)+2*E2(3,2)*E2(3,3)*E3(2,3)+2*E2(2,1)*E2(2,2)*E3(2,1)+2*E2(1,1)*E2(1,2)*E3(2,1)+2*E2(2,3)*E2(3,3)*E3(3,2)-2*E2(3,1)*E2(2,2)*E3(3,1)+2*E2(2,1)*E2(3,2)*E3(3,1)+2*E2(3,1)*E2(3,2)*E3(2,1)+2*E2(1,2)*E2(2,2)*E3(1,2)+2*E2(1,3)*E2(2,3)*E3(1,2)-2*E2(2,2)*E2(1,3)*E3(1,3)+E2(3,2)^2*E3(2,2)-E2(3,3)^2*E3(2,2)+E2(1,2)^2*E3(2,2)+2*E2(3,2)*E2(2,2)*E3(3,2)-2*E2(1,1)*E2(2,2)*E3(1,1)+3*E2(2,2)^2*E3(2,2)+2*E2(2,2)*E2(2,3)*E3(2,3)+2*E2(1,2)*E2(1,3)*E3(2,3)+2*E2(2,1)*E2(1,2)*E3(1,1)+2*E2(2,1)*E2(3,1)*E3(3,2)+2*E2(1,1)*E2(2,1)*E3(1,2);
Mcoefs(67) = 2*E2(2,2)*E2(2,3)*E3(3,3)+2*E2(2,2)*E2(3,3)*E3(2,3)+2*E2(1,3)*E2(3,3)*E3(1,2)+2*E2(3,1)*E2(1,2)*E3(1,1)-2*E2(3,2)*E2(2,3)*E3(2,3)-E2(2,3)^2*E3(3,2)+2*E2(3,1)*E2(3,2)*E3(3,1)-2*E2(3,2)*E2(1,3)*E3(1,3)-2*E2(2,1)*E2(3,2)*E3(2,1)+2*E2(1,1)*E2(1,2)*E3(3,1)+2*E2(1,2)*E2(3,3)*E3(1,3)-2*E2(1,1)*E2(3,2)*E3(1,1)+2*E2(3,2)*E2(3,3)*E3(3,3)+2*E2(1,1)*E2(3,1)*E3(1,2)+2*E2(3,1)*E2(2,2)*E3(2,1)+2*E2(2,1)*E2(2,2)*E3(3,1)+2*E2(2,3)*E2(3,3)*E3(2,2)+2*E2(1,2)*E2(1,3)*E3(3,3)+2*E2(2,1)*E2(3,1)*E3(2,2)+2*E2(3,2)*E2(2,2)*E3(2,2)+2*E2(1,2)*E2(3,2)*E3(1,2)+3*E2(3,2)^2*E3(3,2)+E2(3,3)^2*E3(3,2)+E2(1,2)^2*E3(3,2)-E2(2,1)^2*E3(3,2)-E2(1,3)^2*E3(3,2)-E2(1,1)^2*E3(3,2)+E2(3,1)^2*E3(3,2)+E2(2,2)^2*E3(3,2);
Mcoefs(68) = 3*E2(1,3)^2*E3(1,3)+E2(2,3)^2*E3(1,3)-E2(2,1)^2*E3(1,3)+E2(1,1)^2*E3(1,3)-E2(3,2)^2*E3(1,3)+E2(3,3)^2*E3(1,3)-E2(2,2)^2*E3(1,3)-E2(3,1)^2*E3(1,3)+2*E2(1,2)*E2(3,2)*E3(3,3)+2*E2(1,1)*E2(1,3)*E3(1,1)+2*E2(3,1)*E2(3,3)*E3(1,1)+2*E2(1,2)*E2(3,3)*E3(3,2)+2*E2(3,2)*E2(3,3)*E3(1,2)+E2(1,2)^2*E3(1,3)+2*E2(1,3)*E2(2,3)*E3(2,3)+2*E2(1,1)*E2(3,1)*E3(3,3)+2*E2(1,3)*E2(3,3)*E3(3,3)-2*E2(3,1)*E2(1,3)*E3(3,1)+2*E2(1,2)*E2(1,3)*E3(1,2)+2*E2(1,1)*E2(2,1)*E3(2,3)+2*E2(1,2)*E2(2,3)*E3(2,2)+2*E2(2,2)*E2(2,3)*E3(1,2)-2*E2(3,2)*E2(1,3)*E3(3,2)-2*E2(2,1)*E2(1,3)*E3(2,1)+2*E2(1,1)*E2(2,3)*E3(2,1)-2*E2(2,2)*E2(1,3)*E3(2,2)+2*E2(1,1)*E2(3,3)*E3(3,1)+2*E2(2,1)*E2(2,3)*E3(1,1)+2*E2(1,2)*E2(2,2)*E3(2,3);
Mcoefs(69) = 2*E2(1,3)*E2(2,3)*E3(1,3)+2*E2(3,2)*E2(2,2)*E3(3,3)-2*E2(3,1)*E2(2,3)*E3(3,1)-2*E2(3,2)*E2(2,3)*E3(3,2)+2*E2(2,1)*E2(3,3)*E3(3,1)+2*E2(3,1)*E2(3,3)*E3(2,1)+2*E2(2,3)*E2(3,3)*E3(3,3)+2*E2(1,1)*E2(2,1)*E3(1,3)-E2(1,2)^2*E3(2,3)+2*E2(1,2)*E2(1,3)*E3(2,2)+2*E2(2,2)*E2(1,3)*E3(1,2)+2*E2(1,1)*E2(1,3)*E3(2,1)+2*E2(1,2)*E2(2,2)*E3(1,3)+2*E2(2,1)*E2(1,3)*E3(1,1)+2*E2(2,2)*E2(3,3)*E3(3,2)+2*E2(3,2)*E2(3,3)*E3(2,2)+2*E2(2,1)*E2(3,1)*E3(3,3)+2*E2(2,1)*E2(2,3)*E3(2,1)+2*E2(2,2)*E2(2,3)*E3(2,2)-2*E2(1,1)*E2(2,3)*E3(1,1)-2*E2(1,2)*E2(2,3)*E3(1,2)+E2(2,1)^2*E3(2,3)+E2(3,3)^2*E3(2,3)+E2(2,2)^2*E3(2,3)-E2(3,1)^2*E3(2,3)-E2(3,2)^2*E3(2,3)-E2(1,1)^2*E3(2,3)+E2(1,3)^2*E3(2,3)+3*E2(2,3)^2*E3(2,3);
Mcoefs(70) = E2(1,3)^2*E3(3,3)-E2(2,1)^2*E3(3,3)+E2(3,1)^2*E3(3,3)+E2(2,3)^2*E3(3,3)-E2(1,1)^2*E3(3,3)-E2(2,2)^2*E3(3,3)+3*E2(3,3)^2*E3(3,3)+E2(3,2)^2*E3(3,3)-E2(1,2)^2*E3(3,3)+2*E2(2,1)*E2(2,3)*E3(3,1)+2*E2(3,1)*E2(2,3)*E3(2,1)+2*E2(3,2)*E2(2,2)*E3(2,3)+2*E2(1,3)*E2(3,3)*E3(1,3)+2*E2(2,2)*E2(2,3)*E3(3,2)+2*E2(3,2)*E2(2,3)*E3(2,2)+2*E2(2,3)*E2(3,3)*E3(2,3)+2*E2(1,1)*E2(3,1)*E3(1,3)+2*E2(1,2)*E2(3,2)*E3(1,3)-2*E2(1,1)*E2(3,3)*E3(1,1)-2*E2(1,2)*E2(3,3)*E3(1,2)+2*E2(2,1)*E2(3,1)*E3(2,3)+2*E2(3,2)*E2(3,3)*E3(3,2)+2*E2(3,1)*E2(1,3)*E3(1,1)-2*E2(2,1)*E2(3,3)*E3(2,1)-2*E2(2,2)*E2(3,3)*E3(2,2)+2*E2(3,2)*E2(1,3)*E3(1,2)+2*E2(1,2)*E2(1,3)*E3(3,2)+2*E2(3,1)*E2(3,3)*E3(3,1)+2*E2(1,1)*E2(1,3)*E3(3,1);
Mcoefs(71) = E1(2,1)*E3(3,2)*E3(1,3)+E1(1,1)*E3(2,2)*E3(3,3)+E1(1,3)*E3(2,1)*E3(3,2)+E1(3,2)*E3(2,1)*E3(1,3)-E1(1,1)*E3(3,2)*E3(2,3)-E1(2,3)*E3(1,1)*E3(3,2)-E1(3,2)*E3(1,1)*E3(2,3)-E1(2,1)*E3(1,2)*E3(3,3)-E1(1,2)*E3(2,1)*E3(3,3)+E1(1,2)*E3(3,1)*E3(2,3)+E1(3,1)*E3(1,2)*E3(2,3)-E1(3,3)*E3(2,1)*E3(1,2)-E1(3,1)*E3(2,2)*E3(1,3)-E1(1,3)*E3(3,1)*E3(2,2)-E1(2,2)*E3(3,1)*E3(1,3)+E1(2,2)*E3(1,1)*E3(3,3)+E1(3,3)*E3(1,1)*E3(2,2)+E1(2,3)*E3(3,1)*E3(1,2);
Mcoefs(72) = 3*E1(1,1)*E3(1,1)^2+E1(1,1)*E3(1,2)^2-E1(1,1)*E3(2,2)^2+E1(1,1)*E3(1,3)^2+E1(1,1)*E3(2,1)^2+E1(1,1)*E3(3,1)^2-E1(1,1)*E3(2,3)^2-E1(1,1)*E3(3,2)^2-E1(1,1)*E3(3,3)^2+2*E1(1,2)*E3(1,1)*E3(1,2)-2*E1(2,2)*E3(1,1)*E3(2,2)+2*E1(1,2)*E3(2,1)*E3(2,2)+2*E1(2,1)*E3(1,3)*E3(2,3)+2*E1(2,3)*E3(2,1)*E3(1,3)-2*E1(2,3)*E3(1,1)*E3(2,3)+2*E1(1,3)*E3(2,1)*E3(2,3)+2*E1(3,2)*E3(3,1)*E3(1,2)+2*E1(3,1)*E3(1,1)*E3(3,1)+2*E1(1,2)*E3(3,1)*E3(3,2)+2*E1(1,3)*E3(1,1)*E3(1,3)+2*E1(1,3)*E3(3,1)*E3(3,3)+2*E1(2,2)*E3(2,1)*E3(1,2)+2*E1(2,1)*E3(1,1)*E3(2,1)-2*E1(3,2)*E3(1,1)*E3(3,2)+2*E1(3,1)*E3(1,2)*E3(3,2)-2*E1(3,3)*E3(1,1)*E3(3,3)+2*E1(3,3)*E3(3,1)*E3(1,3)+2*E1(2,1)*E3(1,2)*E3(2,2)+2*E1(3,1)*E3(1,3)*E3(3,3);
Mcoefs(73) = -2*E1(3,3)*E3(2,1)*E3(3,3)+E1(2,1)*E3(2,3)^2+E1(2,1)*E3(3,1)^2+E1(2,1)*E3(1,1)^2+E1(2,1)*E3(2,2)^2-E1(2,1)*E3(3,2)^2-E1(2,1)*E3(3,3)^2-E1(2,1)*E3(1,2)^2-E1(2,1)*E3(1,3)^2+2*E1(2,2)*E3(2,1)*E3(2,2)+2*E1(2,3)*E3(2,1)*E3(2,3)+3*E1(2,1)*E3(2,1)^2-2*E1(1,3)*E3(2,1)*E3(1,3)+2*E1(3,1)*E3(3,2)*E3(2,2)+2*E1(1,1)*E3(1,2)*E3(2,2)+2*E1(1,1)*E3(1,1)*E3(2,1)+2*E1(3,1)*E3(2,1)*E3(3,1)+2*E1(1,2)*E3(1,1)*E3(2,2)-2*E1(3,2)*E3(2,1)*E3(3,2)+2*E1(1,1)*E3(1,3)*E3(2,3)+2*E1(3,1)*E3(2,3)*E3(3,3)+2*E1(3,3)*E3(3,1)*E3(2,3)+2*E1(2,3)*E3(3,1)*E3(3,3)-2*E1(1,2)*E3(2,1)*E3(1,2)+2*E1(3,2)*E3(3,1)*E3(2,2)+2*E1(2,2)*E3(3,1)*E3(3,2)+2*E1(2,2)*E3(1,1)*E3(1,2)+2*E1(2,3)*E3(1,1)*E3(1,3)+2*E1(1,3)*E3(1,1)*E3(2,3);
Mcoefs(74) = E1(3,1)*E3(1,1)^2+E1(3,1)*E3(3,2)^2-E1(3,1)*E3(1,2)^2+E1(3,1)*E3(2,1)^2+3*E1(3,1)*E3(3,1)^2+E1(3,1)*E3(3,3)^2-E1(3,1)*E3(2,3)^2-2*E1(2,2)*E3(3,1)*E3(2,2)+2*E1(1,2)*E3(1,1)*E3(3,2)+2*E1(1,3)*E3(1,1)*E3(3,3)-2*E1(1,2)*E3(3,1)*E3(1,2)+2*E1(3,2)*E3(2,1)*E3(2,2)-2*E1(1,3)*E3(3,1)*E3(1,3)+2*E1(3,3)*E3(3,1)*E3(3,3)+2*E1(1,1)*E3(1,2)*E3(3,2)+2*E1(2,3)*E3(2,1)*E3(3,3)-2*E1(2,3)*E3(3,1)*E3(2,3)+2*E1(3,2)*E3(3,1)*E3(3,2)+2*E1(3,3)*E3(2,1)*E3(2,3)+2*E1(3,2)*E3(1,1)*E3(1,2)+2*E1(1,1)*E3(1,1)*E3(3,1)+2*E1(1,1)*E3(1,3)*E3(3,3)+2*E1(2,1)*E3(2,1)*E3(3,1)-E1(3,1)*E3(2,2)^2-E1(3,1)*E3(1,3)^2+2*E1(2,1)*E3(3,2)*E3(2,2)+2*E1(3,3)*E3(1,1)*E3(1,3)+2*E1(2,1)*E3(2,3)*E3(3,3)+2*E1(2,2)*E3(2,1)*E3(3,2);
Mcoefs(75) = -2*E1(3,1)*E3(3,1)*E3(1,2)+2*E1(3,2)*E3(1,1)*E3(3,1)+2*E1(2,1)*E3(1,1)*E3(2,2)-2*E1(2,3)*E3(1,2)*E3(2,3)+2*E1(1,3)*E3(1,2)*E3(1,3)-2*E1(2,1)*E3(2,1)*E3(1,2)+2*E1(2,2)*E3(1,2)*E3(2,2)+2*E1(1,3)*E3(2,2)*E3(2,3)+2*E1(2,3)*E3(2,2)*E3(1,3)+2*E1(3,2)*E3(1,2)*E3(3,2)+2*E1(1,1)*E3(2,1)*E3(2,2)+2*E1(1,3)*E3(3,2)*E3(3,3)-2*E1(3,3)*E3(1,2)*E3(3,3)-E1(1,2)*E3(3,1)^2+2*E1(2,2)*E3(1,3)*E3(2,3)+2*E1(2,2)*E3(1,1)*E3(2,1)+2*E1(3,1)*E3(1,1)*E3(3,2)+2*E1(1,1)*E3(1,1)*E3(1,2)+2*E1(1,1)*E3(3,1)*E3(3,2)+E1(1,2)*E3(1,1)^2+E1(1,2)*E3(2,2)^2-E1(1,2)*E3(2,3)^2+3*E1(1,2)*E3(1,2)^2-E1(1,2)*E3(2,1)^2+E1(1,2)*E3(1,3)^2-E1(1,2)*E3(3,3)^2+2*E1(3,3)*E3(3,2)*E3(1,3)+2*E1(3,2)*E3(1,3)*E3(3,3)+E1(1,2)*E3(3,2)^2;
Mcoefs(76) = 2*E1(1,2)*E3(1,2)*E3(2,2)+2*E1(3,2)*E3(3,2)*E3(2,2)+2*E1(2,3)*E3(3,2)*E3(3,3)-2*E1(1,3)*E3(2,2)*E3(1,3)+2*E1(3,2)*E3(2,3)*E3(3,3)+2*E1(2,1)*E3(3,1)*E3(3,2)+2*E1(1,2)*E3(1,1)*E3(2,1)+2*E1(2,1)*E3(1,1)*E3(1,2)+2*E1(3,2)*E3(2,1)*E3(3,1)+2*E1(2,1)*E3(2,1)*E3(2,2)+2*E1(2,3)*E3(1,2)*E3(1,3)+2*E1(2,3)*E3(2,2)*E3(2,3)+2*E1(1,1)*E3(2,1)*E3(1,2)+2*E1(1,3)*E3(1,2)*E3(2,3)+2*E1(3,1)*E3(2,1)*E3(3,2)+2*E1(1,2)*E3(1,3)*E3(2,3)-2*E1(3,3)*E3(2,2)*E3(3,3)+E1(2,2)*E3(2,3)^2+E1(2,2)*E3(3,2)^2+2*E1(3,3)*E3(3,2)*E3(2,3)-2*E1(1,1)*E3(1,1)*E3(2,2)-2*E1(3,1)*E3(3,1)*E3(2,2)-E1(2,2)*E3(3,1)^2+E1(2,2)*E3(2,1)^2+3*E1(2,2)*E3(2,2)^2+E1(2,2)*E3(1,2)^2-E1(2,2)*E3(1,3)^2-E1(2,2)*E3(3,3)^2-E1(2,2)*E3(1,1)^2;
Mcoefs(77) = 3*E1(3,2)*E3(3,2)^2-E1(3,2)*E3(1,1)^2-E1(3,2)*E3(1,3)^2+E1(3,2)*E3(2,2)^2+E1(3,2)*E3(1,2)^2-E1(3,2)*E3(2,1)^2+E1(3,2)*E3(3,3)^2-E1(3,2)*E3(2,3)^2+E1(3,2)*E3(3,1)^2-2*E1(2,3)*E3(3,2)*E3(2,3)+2*E1(1,2)*E3(1,1)*E3(3,1)+2*E1(1,1)*E3(3,1)*E3(1,2)-2*E1(1,1)*E3(1,1)*E3(3,2)+2*E1(3,3)*E3(1,2)*E3(1,3)+2*E1(3,1)*E3(1,1)*E3(1,2)+2*E1(1,2)*E3(1,3)*E3(3,3)-2*E1(1,3)*E3(3,2)*E3(1,3)+2*E1(3,1)*E3(3,1)*E3(3,2)+2*E1(1,3)*E3(1,2)*E3(3,3)+2*E1(3,3)*E3(3,2)*E3(3,3)-2*E1(2,1)*E3(2,1)*E3(3,2)+2*E1(2,2)*E3(2,1)*E3(3,1)+2*E1(2,1)*E3(3,1)*E3(2,2)+2*E1(3,1)*E3(2,1)*E3(2,2)+2*E1(2,3)*E3(2,2)*E3(3,3)+2*E1(1,2)*E3(1,2)*E3(3,2)+2*E1(2,2)*E3(2,3)*E3(3,3)+2*E1(2,2)*E3(3,2)*E3(2,2)+2*E1(3,3)*E3(2,2)*E3(2,3);
Mcoefs(78) = 2*E1(2,3)*E3(1,2)*E3(2,2)+2*E1(1,2)*E3(1,2)*E3(1,3)+2*E1(1,1)*E3(2,1)*E3(2,3)-2*E1(2,1)*E3(2,1)*E3(1,3)+2*E1(1,1)*E3(3,1)*E3(3,3)+2*E1(1,2)*E3(3,2)*E3(3,3)+2*E1(3,1)*E3(1,1)*E3(3,3)-2*E1(3,1)*E3(3,1)*E3(1,3)+2*E1(3,3)*E3(1,1)*E3(3,1)+2*E1(1,2)*E3(2,2)*E3(2,3)+2*E1(3,2)*E3(1,2)*E3(3,3)+2*E1(3,3)*E3(1,2)*E3(3,2)+2*E1(2,3)*E3(1,3)*E3(2,3)+2*E1(3,3)*E3(1,3)*E3(3,3)+2*E1(2,1)*E3(1,1)*E3(2,3)-2*E1(3,2)*E3(3,2)*E3(1,3)+2*E1(1,1)*E3(1,1)*E3(1,3)-2*E1(2,2)*E3(2,2)*E3(1,3)+2*E1(2,2)*E3(1,2)*E3(2,3)+2*E1(2,3)*E3(1,1)*E3(2,1)-E1(1,3)*E3(3,1)^2-E1(1,3)*E3(2,1)^2-E1(1,3)*E3(3,2)^2+E1(1,3)*E3(1,2)^2+E1(1,3)*E3(2,3)^2+E1(1,3)*E3(3,3)^2+E1(1,3)*E3(1,1)^2+3*E1(1,3)*E3(1,3)^2-E1(1,3)*E3(2,2)^2;
Mcoefs(79) = E1(2,3)*E3(3,3)^2+E1(2,3)*E3(1,3)^2+E1(2,3)*E3(2,1)^2-E1(2,3)*E3(1,1)^2+3*E1(2,3)*E3(2,3)^2-E1(2,3)*E3(3,2)^2-E1(2,3)*E3(3,1)^2-E1(2,3)*E3(1,2)^2+E1(2,3)*E3(2,2)^2+2*E1(3,1)*E3(2,1)*E3(3,3)+2*E1(3,3)*E3(3,2)*E3(2,2)+2*E1(1,1)*E3(2,1)*E3(1,3)-2*E1(1,2)*E3(1,2)*E3(2,3)+2*E1(1,3)*E3(1,2)*E3(2,2)+2*E1(3,3)*E3(2,3)*E3(3,3)+2*E1(2,2)*E3(3,2)*E3(3,3)+2*E1(2,1)*E3(1,1)*E3(1,3)+2*E1(3,2)*E3(2,2)*E3(3,3)-2*E1(1,1)*E3(1,1)*E3(2,3)+2*E1(1,3)*E3(1,3)*E3(2,3)+2*E1(2,1)*E3(3,1)*E3(3,3)-2*E1(3,2)*E3(3,2)*E3(2,3)-2*E1(3,1)*E3(3,1)*E3(2,3)+2*E1(2,2)*E3(2,2)*E3(2,3)+2*E1(3,3)*E3(2,1)*E3(3,1)+2*E1(1,2)*E3(2,2)*E3(1,3)+2*E1(1,3)*E3(1,1)*E3(2,1)+2*E1(2,1)*E3(2,1)*E3(2,3)+2*E1(2,2)*E3(1,2)*E3(1,3);
Mcoefs(80) = 2*E1(3,1)*E3(3,1)*E3(3,3)-2*E1(1,1)*E3(1,1)*E3(3,3)+2*E1(2,3)*E3(2,3)*E3(3,3)-2*E1(1,2)*E3(1,2)*E3(3,3)+2*E1(1,3)*E3(1,3)*E3(3,3)+E1(3,3)*E3(2,3)^2-2*E1(2,2)*E3(2,2)*E3(3,3)+2*E1(1,1)*E3(3,1)*E3(1,3)+2*E1(3,1)*E3(1,1)*E3(1,3)-2*E1(2,1)*E3(2,1)*E3(3,3)+2*E1(1,2)*E3(3,2)*E3(1,3)+2*E1(3,2)*E3(1,2)*E3(1,3)+2*E1(1,3)*E3(1,1)*E3(3,1)+2*E1(2,1)*E3(3,1)*E3(2,3)+2*E1(1,3)*E3(1,2)*E3(3,2)+2*E1(3,2)*E3(3,2)*E3(3,3)+2*E1(3,2)*E3(2,2)*E3(2,3)+2*E1(2,3)*E3(3,2)*E3(2,2)+2*E1(2,2)*E3(3,2)*E3(2,3)+2*E1(2,3)*E3(2,1)*E3(3,1)+2*E1(3,1)*E3(2,1)*E3(2,3)+3*E1(3,3)*E3(3,3)^2+E1(3,3)*E3(3,2)^2+E1(3,3)*E3(1,3)^2-E1(3,3)*E3(2,1)^2-E1(3,3)*E3(2,2)^2-E1(3,3)*E3(1,1)^2-E1(3,3)*E3(1,2)^2+E1(3,3)*E3(3,1)^2;
Mcoefs(81) = E2(1,3)*E3(2,1)*E3(3,2)+E2(3,2)*E3(2,1)*E3(1,3)+E2(1,1)*E3(2,2)*E3(3,3)-E2(1,1)*E3(3,2)*E3(2,3)-E2(2,3)*E3(1,1)*E3(3,2)+E2(3,1)*E3(1,2)*E3(2,3)+E2(2,1)*E3(3,2)*E3(1,3)+E2(1,2)*E3(3,1)*E3(2,3)+E2(2,3)*E3(3,1)*E3(1,2)+E2(2,2)*E3(1,1)*E3(3,3)+E2(3,3)*E3(1,1)*E3(2,2)-E2(3,2)*E3(1,1)*E3(2,3)-E2(2,1)*E3(1,2)*E3(3,3)-E2(1,2)*E3(2,1)*E3(3,3)-E2(3,3)*E3(2,1)*E3(1,2)-E2(3,1)*E3(2,2)*E3(1,3)-E2(1,3)*E3(3,1)*E3(2,2)-E2(2,2)*E3(3,1)*E3(1,3);
Mcoefs(82) = E2(1,1)*E3(3,1)^2-E2(1,1)*E3(2,2)^2-E2(1,1)*E3(3,2)^2+2*E2(1,3)*E3(3,1)*E3(3,3)+2*E2(3,1)*E3(1,3)*E3(3,3)+2*E2(1,2)*E3(2,1)*E3(2,2)+2*E2(1,2)*E3(1,1)*E3(1,2)-2*E2(2,3)*E3(1,1)*E3(2,3)+2*E2(3,3)*E3(3,1)*E3(1,3)+2*E2(1,3)*E3(1,1)*E3(1,3)+3*E2(1,1)*E3(1,1)^2+E2(1,1)*E3(2,1)^2-E2(1,1)*E3(2,3)^2-E2(1,1)*E3(3,3)^2+E2(1,1)*E3(1,3)^2+E2(1,1)*E3(1,2)^2+2*E2(1,3)*E3(2,1)*E3(2,3)+2*E2(2,1)*E3(1,1)*E3(2,1)-2*E2(3,3)*E3(1,1)*E3(3,3)-2*E2(3,2)*E3(1,1)*E3(3,2)+2*E2(3,1)*E3(1,2)*E3(3,2)+2*E2(3,2)*E3(3,1)*E3(1,2)+2*E2(2,2)*E3(2,1)*E3(1,2)+2*E2(2,1)*E3(1,3)*E3(2,3)+2*E2(2,1)*E3(1,2)*E3(2,2)-2*E2(2,2)*E3(1,1)*E3(2,2)+2*E2(2,3)*E3(2,1)*E3(1,3)+2*E2(3,1)*E3(1,1)*E3(3,1)+2*E2(1,2)*E3(3,1)*E3(3,2);
Mcoefs(83) = E2(2,1)*E3(2,2)^2+3*E2(2,1)*E3(2,1)^2-E2(2,1)*E3(1,2)^2-E2(2,1)*E3(1,3)^2-E2(2,1)*E3(3,2)^2+E2(2,1)*E3(3,1)^2+E2(2,1)*E3(2,3)^2+E2(2,1)*E3(1,1)^2-2*E2(3,3)*E3(2,1)*E3(3,3)+2*E2(2,3)*E3(3,1)*E3(3,3)+2*E2(3,2)*E3(3,1)*E3(2,2)+2*E2(2,2)*E3(3,1)*E3(3,2)-2*E2(1,2)*E3(2,1)*E3(1,2)+2*E2(3,1)*E3(2,1)*E3(3,1)-2*E2(3,2)*E3(2,1)*E3(3,2)+2*E2(2,3)*E3(2,1)*E3(2,3)-2*E2(1,3)*E3(2,1)*E3(1,3)+2*E2(3,3)*E3(3,1)*E3(2,3)+2*E2(1,3)*E3(1,1)*E3(2,3)+2*E2(2,2)*E3(2,1)*E3(2,2)-E2(2,1)*E3(3,3)^2+2*E2(2,2)*E3(1,1)*E3(1,2)+2*E2(2,3)*E3(1,1)*E3(1,3)+2*E2(1,1)*E3(1,3)*E3(2,3)+2*E2(1,2)*E3(1,1)*E3(2,2)+2*E2(3,1)*E3(2,3)*E3(3,3)+2*E2(3,1)*E3(3,2)*E3(2,2)+2*E2(1,1)*E3(1,1)*E3(2,1)+2*E2(1,1)*E3(1,2)*E3(2,2);
Mcoefs(84) = 2*E2(3,2)*E3(1,1)*E3(1,2)+2*E2(3,2)*E3(2,1)*E3(2,2)+2*E2(1,3)*E3(1,1)*E3(3,3)+2*E2(1,1)*E3(1,3)*E3(3,3)-2*E2(1,3)*E3(3,1)*E3(1,3)+2*E2(2,1)*E3(2,1)*E3(3,1)+2*E2(1,2)*E3(1,1)*E3(3,2)-2*E2(2,3)*E3(3,1)*E3(2,3)-2*E2(2,2)*E3(3,1)*E3(2,2)+2*E2(2,3)*E3(2,1)*E3(3,3)+2*E2(3,2)*E3(3,1)*E3(3,2)+2*E2(1,1)*E3(1,1)*E3(3,1)+2*E2(3,3)*E3(1,1)*E3(1,3)-2*E2(1,2)*E3(3,1)*E3(1,2)+2*E2(3,3)*E3(2,1)*E3(2,3)+2*E2(2,2)*E3(2,1)*E3(3,2)-E2(3,1)*E3(2,2)^2+2*E2(3,3)*E3(3,1)*E3(3,3)+2*E2(2,1)*E3(3,2)*E3(2,2)+2*E2(2,1)*E3(2,3)*E3(3,3)+2*E2(1,1)*E3(1,2)*E3(3,2)+3*E2(3,1)*E3(3,1)^2-E2(3,1)*E3(1,2)^2-E2(3,1)*E3(1,3)^2+E2(3,1)*E3(2,1)^2+E2(3,1)*E3(3,3)^2+E2(3,1)*E3(3,2)^2-E2(3,1)*E3(2,3)^2+E2(3,1)*E3(1,1)^2;
Mcoefs(85) = -E2(1,2)*E3(2,1)^2+E2(1,2)*E3(1,1)^2+3*E2(1,2)*E3(1,2)^2+E2(1,2)*E3(1,3)^2+E2(1,2)*E3(2,2)^2+E2(1,2)*E3(3,2)^2-E2(1,2)*E3(2,3)^2+2*E2(2,3)*E3(2,2)*E3(1,3)+2*E2(3,2)*E3(1,2)*E3(3,2)+2*E2(1,1)*E3(2,1)*E3(2,2)+2*E2(2,1)*E3(1,1)*E3(2,2)-2*E2(2,1)*E3(2,1)*E3(1,2)+2*E2(3,3)*E3(3,2)*E3(1,3)+2*E2(3,2)*E3(1,3)*E3(3,3)+2*E2(1,1)*E3(3,1)*E3(3,2)+2*E2(3,1)*E3(1,1)*E3(3,2)+2*E2(1,3)*E3(1,2)*E3(1,3)+2*E2(1,1)*E3(1,1)*E3(1,2)+2*E2(1,3)*E3(3,2)*E3(3,3)+2*E2(2,2)*E3(1,2)*E3(2,2)+2*E2(1,3)*E3(2,2)*E3(2,3)-2*E2(3,1)*E3(3,1)*E3(1,2)+2*E2(3,2)*E3(1,1)*E3(3,1)-2*E2(2,3)*E3(1,2)*E3(2,3)+2*E2(2,2)*E3(1,1)*E3(2,1)+2*E2(2,2)*E3(1,3)*E3(2,3)-2*E2(3,3)*E3(1,2)*E3(3,3)-E2(1,2)*E3(3,3)^2-E2(1,2)*E3(3,1)^2;
Mcoefs(86) = 2*E2(2,1)*E3(2,1)*E3(2,2)+2*E2(2,1)*E3(3,1)*E3(3,2)+2*E2(3,1)*E3(2,1)*E3(3,2)+2*E2(1,1)*E3(2,1)*E3(1,2)+2*E2(1,3)*E3(1,2)*E3(2,3)+2*E2(2,3)*E3(2,2)*E3(2,3)+2*E2(2,3)*E3(1,2)*E3(1,3)-2*E2(3,3)*E3(2,2)*E3(3,3)+2*E2(3,3)*E3(3,2)*E3(2,3)-2*E2(3,1)*E3(3,1)*E3(2,2)+2*E2(3,2)*E3(2,1)*E3(3,1)+2*E2(1,2)*E3(1,3)*E3(2,3)+2*E2(3,2)*E3(2,3)*E3(3,3)-2*E2(1,3)*E3(2,2)*E3(1,3)+2*E2(3,2)*E3(3,2)*E3(2,2)+2*E2(1,2)*E3(1,2)*E3(2,2)-2*E2(1,1)*E3(1,1)*E3(2,2)+2*E2(2,3)*E3(3,2)*E3(3,3)+2*E2(2,1)*E3(1,1)*E3(1,2)+2*E2(1,2)*E3(1,1)*E3(2,1)+E2(2,2)*E3(1,2)^2+E2(2,2)*E3(3,2)^2-E2(2,2)*E3(1,3)^2+E2(2,2)*E3(2,1)^2-E2(2,2)*E3(3,1)^2-E2(2,2)*E3(1,1)^2-E2(2,2)*E3(3,3)^2+3*E2(2,2)*E3(2,2)^2+E2(2,2)*E3(2,3)^2;
Mcoefs(87) = -E2(3,2)*E3(1,3)^2-E2(3,2)*E3(1,1)^2-E2(3,2)*E3(2,1)^2+E2(3,2)*E3(3,3)^2+3*E2(3,2)*E3(3,2)^2+E2(3,2)*E3(3,1)^2+E2(3,2)*E3(1,2)^2-E2(3,2)*E3(2,3)^2+E2(3,2)*E3(2,2)^2+2*E2(1,3)*E3(1,2)*E3(3,3)+2*E2(3,3)*E3(1,2)*E3(1,3)+2*E2(1,2)*E3(1,3)*E3(3,3)+2*E2(2,3)*E3(2,2)*E3(3,3)+2*E2(3,1)*E3(3,1)*E3(3,2)+2*E2(1,2)*E3(1,2)*E3(3,2)+2*E2(3,3)*E3(2,2)*E3(2,3)+2*E2(2,1)*E3(3,1)*E3(2,2)+2*E2(3,3)*E3(3,2)*E3(3,3)+2*E2(3,1)*E3(2,1)*E3(2,2)+2*E2(2,2)*E3(3,2)*E3(2,2)-2*E2(1,3)*E3(3,2)*E3(1,3)+2*E2(2,2)*E3(2,3)*E3(3,3)-2*E2(2,3)*E3(3,2)*E3(2,3)-2*E2(2,1)*E3(2,1)*E3(3,2)+2*E2(2,2)*E3(2,1)*E3(3,1)+2*E2(1,1)*E3(3,1)*E3(1,2)+2*E2(3,1)*E3(1,1)*E3(1,2)+2*E2(1,2)*E3(1,1)*E3(3,1)-2*E2(1,1)*E3(1,1)*E3(3,2);
Mcoefs(88) = 2*E2(2,3)*E3(1,2)*E3(2,2)+2*E2(1,1)*E3(3,1)*E3(3,3)+2*E2(3,1)*E3(1,1)*E3(3,3)+2*E2(1,2)*E3(3,2)*E3(3,3)-E2(1,3)*E3(2,2)^2+E2(1,3)*E3(2,3)^2+2*E2(1,1)*E3(1,1)*E3(1,3)+2*E2(1,2)*E3(1,2)*E3(1,3)+2*E2(3,3)*E3(1,3)*E3(3,3)-2*E2(3,1)*E3(3,1)*E3(1,3)+2*E2(3,3)*E3(1,1)*E3(3,1)+2*E2(1,1)*E3(2,1)*E3(2,3)+2*E2(2,1)*E3(1,1)*E3(2,3)+2*E2(2,3)*E3(1,1)*E3(2,1)-2*E2(3,2)*E3(3,2)*E3(1,3)-2*E2(2,1)*E3(2,1)*E3(1,3)-2*E2(2,2)*E3(2,2)*E3(1,3)+2*E2(3,3)*E3(1,2)*E3(3,2)+2*E2(1,2)*E3(2,2)*E3(2,3)+2*E2(2,2)*E3(1,2)*E3(2,3)+2*E2(2,3)*E3(1,3)*E3(2,3)+2*E2(3,2)*E3(1,2)*E3(3,3)-E2(1,3)*E3(3,1)^2-E2(1,3)*E3(3,2)^2-E2(1,3)*E3(2,1)^2+3*E2(1,3)*E3(1,3)^2+E2(1,3)*E3(3,3)^2+E2(1,3)*E3(1,1)^2+E2(1,3)*E3(1,2)^2;
Mcoefs(89) = E2(2,3)*E3(2,1)^2+E2(2,3)*E3(2,2)^2+E2(2,3)*E3(1,3)^2-E2(2,3)*E3(1,1)^2+E2(2,3)*E3(3,3)^2+3*E2(2,3)*E3(2,3)^2-E2(2,3)*E3(3,1)^2-E2(2,3)*E3(3,2)^2-E2(2,3)*E3(1,2)^2+2*E2(1,3)*E3(1,2)*E3(2,2)-2*E2(1,1)*E3(1,1)*E3(2,3)-2*E2(1,2)*E3(1,2)*E3(2,3)+2*E2(2,1)*E3(2,1)*E3(2,3)+2*E2(1,3)*E3(1,3)*E3(2,3)+2*E2(2,2)*E3(2,2)*E3(2,3)-2*E2(3,1)*E3(3,1)*E3(2,3)+2*E2(1,1)*E3(2,1)*E3(1,3)+2*E2(2,1)*E3(1,1)*E3(1,3)+2*E2(2,2)*E3(3,2)*E3(3,3)+2*E2(3,2)*E3(2,2)*E3(3,3)+2*E2(3,3)*E3(2,1)*E3(3,1)-2*E2(3,2)*E3(3,2)*E3(2,3)+2*E2(2,1)*E3(3,1)*E3(3,3)+2*E2(3,1)*E3(2,1)*E3(3,3)+2*E2(3,3)*E3(3,2)*E3(2,2)+2*E2(3,3)*E3(2,3)*E3(3,3)+2*E2(1,3)*E3(1,1)*E3(2,1)+2*E2(1,2)*E3(2,2)*E3(1,3)+2*E2(2,2)*E3(1,2)*E3(1,3);
Mcoefs(90) = -E2(3,3)*E3(2,2)^2+E2(3,3)*E3(3,2)^2-E2(3,3)*E3(1,1)^2+3*E2(3,3)*E3(3,3)^2-E2(3,3)*E3(1,2)^2+2*E2(1,3)*E3(1,1)*E3(3,1)+2*E2(1,1)*E3(3,1)*E3(1,3)+E2(3,3)*E3(2,3)^2+2*E2(3,1)*E3(1,1)*E3(1,3)-2*E2(2,2)*E3(2,2)*E3(3,3)+2*E2(1,2)*E3(3,2)*E3(1,3)-2*E2(2,1)*E3(2,1)*E3(3,3)-E2(3,3)*E3(2,1)^2+E2(3,3)*E3(3,1)^2+E2(3,3)*E3(1,3)^2+2*E2(2,3)*E3(2,1)*E3(3,1)+2*E2(1,3)*E3(1,3)*E3(3,3)+2*E2(2,2)*E3(3,2)*E3(2,3)+2*E2(3,2)*E3(2,2)*E3(2,3)-2*E2(1,1)*E3(1,1)*E3(3,3)+2*E2(2,3)*E3(2,3)*E3(3,3)+2*E2(1,3)*E3(1,2)*E3(3,2)+2*E2(2,3)*E3(3,2)*E3(2,2)-2*E2(1,2)*E3(1,2)*E3(3,3)+2*E2(3,2)*E3(3,2)*E3(3,3)+2*E2(3,1)*E3(3,1)*E3(3,3)+2*E2(2,1)*E3(3,1)*E3(2,3)+2*E2(3,1)*E3(2,1)*E3(2,3)+2*E2(3,2)*E3(1,2)*E3(1,3);
Mcoefs(91) = E3(2,1)*E3(3,2)*E3(1,3)-E3(2,1)*E3(1,2)*E3(3,3)-E3(1,1)*E3(3,2)*E3(2,3)+E3(1,1)*E3(2,2)*E3(3,3)+E3(3,1)*E3(1,2)*E3(2,3)-E3(3,1)*E3(2,2)*E3(1,3);
Mcoefs(92) = -E3(1,1)*E3(3,3)^2-E3(1,1)*E3(2,2)^2+E3(1,1)*E3(3,1)^2+E3(1,1)*E3(1,3)^2-E3(1,1)*E3(3,2)^2-E3(1,1)*E3(2,3)^2+E3(1,1)*E3(2,1)^2+E3(1,1)*E3(1,2)^2+E3(1,1)^3+2*E3(2,1)*E3(1,2)*E3(2,2)+2*E3(2,1)*E3(1,3)*E3(2,3)+2*E3(3,1)*E3(1,3)*E3(3,3)+2*E3(3,1)*E3(1,2)*E3(3,2);
Mcoefs(93) = 2*E3(3,1)*E3(3,2)*E3(2,2)+2*E3(1,1)*E3(1,3)*E3(2,3)+2*E3(1,1)*E3(1,2)*E3(2,2)+2*E3(3,1)*E3(2,3)*E3(3,3)+E3(2,1)*E3(2,2)^2+E3(2,1)*E3(2,3)^2+E3(1,1)^2*E3(2,1)+E3(2,1)*E3(3,1)^2-E3(2,1)*E3(1,3)^2-E3(2,1)*E3(1,2)^2-E3(2,1)*E3(3,3)^2-E3(2,1)*E3(3,2)^2+E3(2,1)^3;
Mcoefs(94) = E3(3,1)^3+2*E3(1,1)*E3(1,2)*E3(3,2)+2*E3(2,1)*E3(2,3)*E3(3,3)+2*E3(1,1)*E3(1,3)*E3(3,3)-E3(3,1)*E3(1,3)^2+E3(1,1)^2*E3(3,1)+E3(3,1)*E3(3,3)^2+E3(2,1)^2*E3(3,1)+E3(3,1)*E3(3,2)^2-E3(3,1)*E3(1,2)^2-E3(3,1)*E3(2,3)^2-E3(3,1)*E3(2,2)^2+2*E3(2,1)*E3(3,2)*E3(2,2);
Mcoefs(95) = E3(1,2)^3+2*E3(2,2)*E3(1,3)*E3(2,3)-E3(3,1)^2*E3(1,2)-E3(1,2)*E3(2,3)^2-E3(2,1)^2*E3(1,2)-E3(1,2)*E3(3,3)^2+E3(1,1)^2*E3(1,2)+E3(1,2)*E3(1,3)^2+E3(1,2)*E3(2,2)^2+E3(1,2)*E3(3,2)^2+2*E3(1,1)*E3(3,1)*E3(3,2)+2*E3(3,2)*E3(1,3)*E3(3,3)+2*E3(1,1)*E3(2,1)*E3(2,2);
Mcoefs(96) = E3(2,2)*E3(2,3)^2+E3(2,1)^2*E3(2,2)+2*E3(2,1)*E3(3,1)*E3(3,2)+2*E3(1,2)*E3(1,3)*E3(2,3)+2*E3(1,1)*E3(2,1)*E3(1,2)+2*E3(3,2)*E3(2,3)*E3(3,3)+E3(1,2)^2*E3(2,2)+E3(3,2)^2*E3(2,2)-E3(2,2)*E3(3,3)^2-E3(3,1)^2*E3(2,2)-E3(2,2)*E3(1,3)^2-E3(1,1)^2*E3(2,2)+E3(2,2)^3;
Mcoefs(97) = E3(3,2)^3+2*E3(2,1)*E3(3,1)*E3(2,2)+2*E3(2,2)*E3(2,3)*E3(3,3)+2*E3(1,2)*E3(1,3)*E3(3,3)+2*E3(1,1)*E3(3,1)*E3(1,2)+E3(3,1)^2*E3(3,2)+E3(3,2)*E3(3,3)^2-E3(3,2)*E3(1,3)^2-E3(1,1)^2*E3(3,2)+E3(1,2)^2*E3(3,2)-E3(2,1)^2*E3(3,2)+E3(3,2)*E3(2,2)^2-E3(3,2)*E3(2,3)^2;
Mcoefs(98) = -E3(3,1)^2*E3(1,3)-E3(3,2)^2*E3(1,3)-E3(2,1)^2*E3(1,3)-E3(2,2)^2*E3(1,3)+E3(1,3)*E3(2,3)^2+E3(1,3)*E3(3,3)^2+E3(1,1)^2*E3(1,3)+E3(1,2)^2*E3(1,3)+2*E3(1,1)*E3(3,1)*E3(3,3)+2*E3(1,1)*E3(2,1)*E3(2,3)+E3(1,3)^3+2*E3(1,2)*E3(3,2)*E3(3,3)+2*E3(1,2)*E3(2,2)*E3(2,3);
Mcoefs(99) = 2*E3(1,1)*E3(2,1)*E3(1,3)+2*E3(2,1)*E3(3,1)*E3(3,3)+2*E3(1,2)*E3(2,2)*E3(1,3)+E3(1,3)^2*E3(2,3)-E3(1,2)^2*E3(2,3)-E3(3,1)^2*E3(2,3)-E3(3,2)^2*E3(2,3)-E3(1,1)^2*E3(2,3)+E3(2,1)^2*E3(2,3)+E3(2,2)^2*E3(2,3)+E3(2,3)*E3(3,3)^2+E3(2,3)^3+2*E3(3,2)*E3(2,2)*E3(3,3);
Mcoefs(100) = -E3(2,1)^2*E3(3,3)+E3(1,3)^2*E3(3,3)-E3(2,2)^2*E3(3,3)+E3(2,3)^2*E3(3,3)+2*E3(1,2)*E3(3,2)*E3(1,3)+2*E3(1,1)*E3(3,1)*E3(1,3)+E3(3,3)^3+2*E3(2,1)*E3(3,1)*E3(2,3)-E3(1,2)^2*E3(3,3)-E3(1,1)^2*E3(3,3)+E3(3,1)^2*E3(3,3)+2*E3(3,2)*E3(2,2)*E3(2,3)+E3(3,2)^2*E3(3,3);
Mcoefs(101) = E1(1,1)*E1(2,2)*E4(3,3)-E1(1,2)*E1(3,3)*E4(2,1)-E1(3,1)*E1(2,2)*E4(1,3)+E1(1,2)*E1(2,3)*E4(3,1)+E1(2,1)*E1(1,3)*E4(3,2)+E1(3,1)*E1(1,2)*E4(2,3)+E1(2,2)*E1(3,3)*E4(1,1)-E1(1,1)*E1(2,3)*E4(3,2)-E1(1,1)*E1(3,2)*E4(2,3)-E1(3,2)*E1(2,3)*E4(1,1)-E1(2,1)*E1(1,2)*E4(3,3)-E1(2,1)*E1(3,3)*E4(1,2)+E1(3,1)*E1(2,3)*E4(1,2)+E1(2,1)*E1(3,2)*E4(1,3)+E1(1,1)*E1(3,3)*E4(2,2)-E1(3,1)*E1(1,3)*E4(2,2)-E1(2,2)*E1(1,3)*E4(3,1)+E1(3,2)*E1(1,3)*E4(2,1);
Mcoefs(102) = 2*E1(3,1)*E1(3,2)*E4(1,2)+2*E1(3,1)*E1(1,2)*E4(3,2)+2*E1(1,3)*E1(3,3)*E4(3,1)+3*E1(1,1)^2*E4(1,1)+2*E1(1,2)*E1(3,2)*E4(3,1)+2*E1(1,1)*E1(3,1)*E4(3,1)-2*E1(1,1)*E1(2,3)*E4(2,3)-2*E1(1,1)*E1(3,3)*E4(3,3)-2*E1(1,1)*E1(3,2)*E4(3,2)+2*E1(1,1)*E1(2,1)*E4(2,1)-2*E1(1,1)*E1(2,2)*E4(2,2)+2*E1(1,2)*E1(2,2)*E4(2,1)+2*E1(1,3)*E1(2,3)*E4(2,1)+2*E1(2,1)*E1(2,3)*E4(1,3)+2*E1(2,1)*E1(2,2)*E4(1,2)+2*E1(2,1)*E1(1,2)*E4(2,2)+2*E1(2,1)*E1(1,3)*E4(2,3)+2*E1(1,1)*E1(1,3)*E4(1,3)+2*E1(1,1)*E1(1,2)*E4(1,2)+2*E1(3,1)*E1(3,3)*E4(1,3)+2*E1(3,1)*E1(1,3)*E4(3,3)+E1(1,3)^2*E4(1,1)+E1(1,2)^2*E4(1,1)-E1(2,2)^2*E4(1,1)+E1(2,1)^2*E4(1,1)-E1(3,3)^2*E4(1,1)-E1(3,2)^2*E4(1,1)-E1(2,3)^2*E4(1,1)+E1(3,1)^2*E4(1,1);
Mcoefs(103) = 3*E1(2,1)^2*E4(2,1)+2*E1(3,1)*E1(3,3)*E4(2,3)+2*E1(3,1)*E1(2,3)*E4(3,3)+E1(2,3)^2*E4(2,1)+E1(2,2)^2*E4(2,1)+E1(3,1)^2*E4(2,1)-E1(1,2)^2*E4(2,1)+2*E1(2,3)*E1(3,3)*E4(3,1)+2*E1(3,1)*E1(3,2)*E4(2,2)+E1(1,1)^2*E4(2,1)+2*E1(3,2)*E1(2,2)*E4(3,1)+2*E1(3,1)*E1(2,2)*E4(3,2)-E1(1,3)^2*E4(2,1)-E1(3,2)^2*E4(2,1)-2*E1(2,1)*E1(3,3)*E4(3,3)-2*E1(2,1)*E1(3,2)*E4(3,2)+2*E1(1,1)*E1(2,3)*E4(1,3)+2*E1(2,1)*E1(3,1)*E4(3,1)+2*E1(1,1)*E1(2,2)*E4(1,2)+2*E1(1,1)*E1(1,3)*E4(2,3)+2*E1(1,1)*E1(2,1)*E4(1,1)+2*E1(1,1)*E1(1,2)*E4(2,2)+2*E1(1,2)*E1(2,2)*E4(1,1)+2*E1(1,3)*E1(2,3)*E4(1,1)+2*E1(2,1)*E1(2,2)*E4(2,2)+2*E1(2,1)*E1(2,3)*E4(2,3)-2*E1(2,1)*E1(1,2)*E4(1,2)-2*E1(2,1)*E1(1,3)*E4(1,3)-E1(3,3)^2*E4(2,1);
Mcoefs(104) = 2*E1(2,1)*E1(2,2)*E4(3,2)-2*E1(3,1)*E1(2,3)*E4(2,3)-2*E1(3,1)*E1(1,2)*E4(1,2)+2*E1(1,1)*E1(3,1)*E4(1,1)+2*E1(2,1)*E1(3,2)*E4(2,2)+E1(2,1)^2*E4(3,1)+E1(1,1)^2*E4(3,1)+E1(3,2)^2*E4(3,1)+3*E1(3,1)^2*E4(3,1)-E1(2,3)^2*E4(3,1)-E1(2,2)^2*E4(3,1)-E1(1,2)^2*E4(3,1)+E1(3,3)^2*E4(3,1)-E1(1,3)^2*E4(3,1)-2*E1(3,1)*E1(2,2)*E4(2,2)+2*E1(2,1)*E1(3,3)*E4(2,3)+2*E1(1,1)*E1(1,3)*E4(3,3)+2*E1(2,1)*E1(3,1)*E4(2,1)+2*E1(2,3)*E1(3,3)*E4(2,1)+2*E1(2,1)*E1(2,3)*E4(3,3)+2*E1(3,1)*E1(3,2)*E4(3,2)+2*E1(3,2)*E1(2,2)*E4(2,1)-2*E1(3,1)*E1(1,3)*E4(1,3)+2*E1(1,2)*E1(3,2)*E4(1,1)+2*E1(1,3)*E1(3,3)*E4(1,1)+2*E1(1,1)*E1(3,3)*E4(1,3)+2*E1(3,1)*E1(3,3)*E4(3,3)+2*E1(1,1)*E1(3,2)*E4(1,2)+2*E1(1,1)*E1(1,2)*E4(3,2);
Mcoefs(105) = 2*E1(1,3)*E1(3,3)*E4(3,2)+2*E1(2,1)*E1(2,2)*E4(1,1)+2*E1(1,1)*E1(2,2)*E4(2,1)-2*E1(1,2)*E1(3,3)*E4(3,3)+2*E1(3,2)*E1(3,3)*E4(1,3)+2*E1(1,3)*E1(2,3)*E4(2,2)-2*E1(1,2)*E1(2,3)*E4(2,3)-2*E1(2,1)*E1(1,2)*E4(2,1)+2*E1(1,2)*E1(3,2)*E4(3,2)-2*E1(3,1)*E1(1,2)*E4(3,1)+2*E1(1,1)*E1(3,1)*E4(3,2)+2*E1(1,1)*E1(3,2)*E4(3,1)+2*E1(1,1)*E1(2,1)*E4(2,2)+2*E1(1,1)*E1(1,2)*E4(1,1)+2*E1(1,2)*E1(1,3)*E4(1,3)+2*E1(3,1)*E1(3,2)*E4(1,1)+2*E1(2,2)*E1(1,3)*E4(2,3)+2*E1(1,2)*E1(2,2)*E4(2,2)+2*E1(3,2)*E1(1,3)*E4(3,3)+2*E1(2,2)*E1(2,3)*E4(1,3)+E1(1,3)^2*E4(1,2)+E1(3,2)^2*E4(1,2)-E1(2,3)^2*E4(1,2)-E1(3,3)^2*E4(1,2)-E1(2,1)^2*E4(1,2)-E1(3,1)^2*E4(1,2)+3*E1(1,2)^2*E4(1,2)+E1(2,2)^2*E4(1,2)+E1(1,1)^2*E4(1,2);
Mcoefs(106) = E1(2,1)^2*E4(2,2)+E1(2,3)^2*E4(2,2)-E1(3,3)^2*E4(2,2)-E1(1,3)^2*E4(2,2)-E1(3,1)^2*E4(2,2)+E1(3,2)^2*E4(2,2)+3*E1(2,2)^2*E4(2,2)+E1(1,2)^2*E4(2,2)-E1(1,1)^2*E4(2,2)+2*E1(3,2)*E1(3,3)*E4(2,3)+2*E1(1,3)*E1(2,3)*E4(1,2)+2*E1(1,1)*E1(1,2)*E4(2,1)-2*E1(2,2)*E1(3,3)*E4(3,3)+2*E1(3,2)*E1(2,3)*E4(3,3)-2*E1(2,2)*E1(1,3)*E4(1,3)+2*E1(3,2)*E1(2,2)*E4(3,2)+2*E1(2,1)*E1(2,2)*E4(2,1)+2*E1(2,2)*E1(2,3)*E4(2,3)+2*E1(3,1)*E1(3,2)*E4(2,1)+2*E1(2,1)*E1(3,2)*E4(3,1)+2*E1(1,2)*E1(1,3)*E4(2,3)-2*E1(3,1)*E1(2,2)*E4(3,1)+2*E1(2,3)*E1(3,3)*E4(3,2)+2*E1(2,1)*E1(3,1)*E4(3,2)+2*E1(1,2)*E1(2,3)*E4(1,3)+2*E1(2,1)*E1(1,2)*E4(1,1)+2*E1(1,2)*E1(2,2)*E4(1,2)+2*E1(1,1)*E1(2,1)*E4(1,2)-2*E1(1,1)*E1(2,2)*E4(1,1);
Mcoefs(107) = E1(3,3)^2*E4(3,2)+2*E1(3,1)*E1(3,2)*E4(3,1)+2*E1(2,1)*E1(3,1)*E4(2,2)+2*E1(3,1)*E1(2,2)*E4(2,1)+2*E1(2,2)*E1(2,3)*E4(3,3)-2*E1(1,1)*E1(3,2)*E4(1,1)+2*E1(2,2)*E1(3,3)*E4(2,3)+2*E1(3,1)*E1(1,2)*E4(1,1)+2*E1(1,1)*E1(1,2)*E4(3,1)-2*E1(3,2)*E1(1,3)*E4(1,3)-2*E1(2,1)*E1(3,2)*E4(2,1)+2*E1(1,3)*E1(3,3)*E4(1,2)+2*E1(1,1)*E1(3,1)*E4(1,2)-2*E1(3,2)*E1(2,3)*E4(2,3)+2*E1(1,2)*E1(1,3)*E4(3,3)+2*E1(1,2)*E1(3,3)*E4(1,3)+2*E1(1,2)*E1(3,2)*E4(1,2)+2*E1(2,1)*E1(2,2)*E4(3,1)+2*E1(3,2)*E1(3,3)*E4(3,3)+2*E1(3,2)*E1(2,2)*E4(2,2)+2*E1(2,3)*E1(3,3)*E4(2,2)+E1(3,1)^2*E4(3,2)+E1(2,2)^2*E4(3,2)-E1(2,1)^2*E4(3,2)-E1(1,3)^2*E4(3,2)+E1(1,2)^2*E4(3,2)+3*E1(3,2)^2*E4(3,2)-E1(1,1)^2*E4(3,2)-E1(2,3)^2*E4(3,2);
Mcoefs(108) = 2*E1(3,2)*E1(3,3)*E4(1,2)+2*E1(1,1)*E1(3,3)*E4(3,1)-2*E1(3,1)*E1(1,3)*E4(3,1)-2*E1(3,2)*E1(1,3)*E4(3,2)-2*E1(2,1)*E1(1,3)*E4(2,1)+2*E1(1,1)*E1(2,1)*E4(2,3)-2*E1(2,2)*E1(1,3)*E4(2,2)+2*E1(3,1)*E1(3,3)*E4(1,1)+2*E1(2,1)*E1(2,3)*E4(1,1)+2*E1(1,2)*E1(2,3)*E4(2,2)+2*E1(2,2)*E1(2,3)*E4(1,2)+2*E1(1,1)*E1(2,3)*E4(2,1)+2*E1(1,3)*E1(3,3)*E4(3,3)+2*E1(1,1)*E1(3,1)*E4(3,3)+2*E1(1,2)*E1(2,2)*E4(2,3)+2*E1(1,2)*E1(1,3)*E4(1,2)+2*E1(1,3)*E1(2,3)*E4(2,3)+2*E1(1,2)*E1(3,2)*E4(3,3)+2*E1(1,1)*E1(1,3)*E4(1,1)+2*E1(1,2)*E1(3,3)*E4(3,2)-E1(2,1)^2*E4(1,3)-E1(3,2)^2*E4(1,3)-E1(3,1)^2*E4(1,3)-E1(2,2)^2*E4(1,3)+E1(3,3)^2*E4(1,3)+E1(2,3)^2*E4(1,3)+3*E1(1,3)^2*E4(1,3)+E1(1,1)^2*E4(1,3)+E1(1,2)^2*E4(1,3);
Mcoefs(109) = -E1(3,1)^2*E4(2,3)-2*E1(1,2)*E1(2,3)*E4(1,2)-2*E1(3,1)*E1(2,3)*E4(3,1)-E1(3,2)^2*E4(2,3)-E1(1,1)^2*E4(2,3)+3*E1(2,3)^2*E4(2,3)+E1(1,3)^2*E4(2,3)+2*E1(2,1)*E1(3,3)*E4(3,1)+2*E1(3,1)*E1(3,3)*E4(2,1)-E1(1,2)^2*E4(2,3)+2*E1(2,1)*E1(1,3)*E4(1,1)+2*E1(1,2)*E1(1,3)*E4(2,2)-2*E1(3,2)*E1(2,3)*E4(3,2)+2*E1(2,2)*E1(1,3)*E4(1,2)+2*E1(1,1)*E1(1,3)*E4(2,1)+2*E1(1,1)*E1(2,1)*E4(1,3)+2*E1(1,2)*E1(2,2)*E4(1,3)+2*E1(1,3)*E1(2,3)*E4(1,3)+2*E1(2,2)*E1(3,3)*E4(3,2)+2*E1(2,1)*E1(2,3)*E4(2,1)+2*E1(2,3)*E1(3,3)*E4(3,3)+2*E1(2,1)*E1(3,1)*E4(3,3)+2*E1(3,2)*E1(3,3)*E4(2,2)+2*E1(3,2)*E1(2,2)*E4(3,3)-2*E1(1,1)*E1(2,3)*E4(1,1)+2*E1(2,2)*E1(2,3)*E4(2,2)+E1(3,3)^2*E4(2,3)+E1(2,2)^2*E4(2,3)+E1(2,1)^2*E4(2,3);
Mcoefs(110) = -E1(1,1)^2*E4(3,3)+3*E1(3,3)^2*E4(3,3)+E1(3,2)^2*E4(3,3)-E1(2,1)^2*E4(3,3)+E1(1,3)^2*E4(3,3)+E1(2,3)^2*E4(3,3)-E1(1,2)^2*E4(3,3)-E1(2,2)^2*E4(3,3)+2*E1(3,1)*E1(1,3)*E4(1,1)+2*E1(1,2)*E1(1,3)*E4(3,2)+2*E1(2,3)*E1(3,3)*E4(2,3)+2*E1(3,2)*E1(2,3)*E4(2,2)+E1(3,1)^2*E4(3,3)-2*E1(2,2)*E1(3,3)*E4(2,2)-2*E1(1,1)*E1(3,3)*E4(1,1)+2*E1(3,2)*E1(3,3)*E4(3,2)+2*E1(2,2)*E1(2,3)*E4(3,2)+2*E1(3,2)*E1(2,2)*E4(2,3)+2*E1(2,1)*E1(3,1)*E4(2,3)+2*E1(3,2)*E1(1,3)*E4(1,2)+2*E1(2,1)*E1(2,3)*E4(3,1)+2*E1(3,1)*E1(2,3)*E4(2,1)+2*E1(1,3)*E1(3,3)*E4(1,3)+2*E1(1,1)*E1(3,1)*E4(1,3)+2*E1(1,2)*E1(3,2)*E4(1,3)-2*E1(1,2)*E1(3,3)*E4(1,2)+2*E1(3,1)*E1(3,3)*E4(3,1)-2*E1(2,1)*E1(3,3)*E4(2,1)+2*E1(1,1)*E1(1,3)*E4(3,1);
Mcoefs(111) = -E1(3,2)*E2(2,3)*E4(1,1)-E1(2,1)*E2(1,2)*E4(3,3)-E1(2,1)*E2(3,3)*E4(1,2)+E1(3,1)*E2(2,3)*E4(1,2)+E1(2,3)*E2(1,2)*E4(3,1)+E1(2,1)*E2(3,2)*E4(1,3)-E1(1,1)*E2(2,3)*E4(3,2)-E1(1,1)*E2(3,2)*E4(2,3)-E1(2,3)*E2(1,1)*E4(3,2)-E1(3,2)*E2(1,1)*E4(2,3)-E1(3,3)*E2(2,1)*E4(1,2)-E1(3,1)*E2(1,3)*E4(2,2)-E1(3,1)*E2(2,2)*E4(1,3)-E1(2,2)*E2(3,1)*E4(1,3)-E1(1,3)*E2(3,1)*E4(2,2)+E1(1,2)*E2(2,3)*E4(3,1)-E1(3,3)*E2(1,2)*E4(2,1)-E1(1,2)*E2(3,3)*E4(2,1)-E1(2,2)*E2(1,3)*E4(3,1)+E1(1,1)*E2(2,2)*E4(3,3)+E1(3,2)*E2(2,1)*E4(1,3)+E1(3,3)*E2(2,2)*E4(1,1)-E1(1,3)*E2(2,2)*E4(3,1)+E1(1,2)*E2(3,1)*E4(2,3)-E1(1,2)*E2(2,1)*E4(3,3)+E1(2,1)*E2(1,3)*E4(3,2)+E1(2,2)*E2(3,3)*E4(1,1)+E1(1,1)*E2(3,3)*E4(2,2)+E1(1,3)*E2(2,1)*E4(3,2)+E1(3,2)*E2(1,3)*E4(2,1)+E1(3,3)*E2(1,1)*E4(2,2)+E1(1,3)*E2(3,2)*E4(2,1)+E1(2,3)*E2(3,1)*E4(1,2)+E1(3,1)*E2(1,2)*E4(2,3)+E1(2,2)*E2(1,1)*E4(3,3)-E1(2,3)*E2(3,2)*E4(1,1);
Mcoefs(112) = 2*E1(3,1)*E2(3,2)*E4(1,2)+2*E1(1,3)*E2(2,1)*E4(2,3)-2*E1(2,3)*E2(1,1)*E4(2,3)+2*E1(3,1)*E2(3,3)*E4(1,3)+2*E1(3,3)*E2(3,1)*E4(1,3)+2*E1(3,1)*E2(1,3)*E4(3,3)+2*E1(2,2)*E2(1,2)*E4(2,1)+2*E1(1,2)*E2(2,2)*E4(2,1)+2*E1(1,3)*E2(2,3)*E4(2,1)+2*E1(2,1)*E2(1,1)*E4(2,1)+2*E1(1,2)*E2(1,2)*E4(1,1)+2*E1(2,1)*E2(1,3)*E4(2,3)+2*E1(2,1)*E2(2,2)*E4(1,2)+2*E1(2,2)*E2(2,1)*E4(1,2)+2*E1(2,1)*E2(1,2)*E4(2,2)+2*E1(2,1)*E2(2,1)*E4(1,1)+2*E1(2,3)*E2(1,3)*E4(2,1)-2*E1(2,3)*E2(2,3)*E4(1,1)-2*E1(2,2)*E2(2,2)*E4(1,1)+2*E1(1,2)*E2(2,1)*E4(2,2)+2*E1(3,2)*E2(3,1)*E4(1,2)-2*E1(1,1)*E2(2,2)*E4(2,2)-2*E1(3,2)*E2(1,1)*E4(3,2)-2*E1(2,2)*E2(1,1)*E4(2,2)+2*E1(1,3)*E2(1,1)*E4(1,3)+2*E1(1,1)*E2(1,3)*E4(1,3)+6*E1(1,1)*E2(1,1)*E4(1,1)+2*E1(1,2)*E2(1,1)*E4(1,2)-2*E1(3,3)*E2(1,1)*E4(3,3)-2*E1(3,3)*E2(3,3)*E4(1,1)-2*E1(3,2)*E2(3,2)*E4(1,1)+2*E1(1,3)*E2(1,3)*E4(1,1)+2*E1(1,1)*E2(1,2)*E4(1,2)+2*E1(2,1)*E2(2,3)*E4(1,3)+2*E1(2,3)*E2(2,1)*E4(1,3)-2*E1(1,1)*E2(2,3)*E4(2,3)+2*E1(3,1)*E2(1,2)*E4(3,2)+2*E1(1,2)*E2(3,1)*E4(3,2)+2*E1(3,1)*E2(3,1)*E4(1,1)+2*E1(3,1)*E2(1,1)*E4(3,1)+2*E1(3,3)*E2(1,3)*E4(3,1)+2*E1(1,3)*E2(3,3)*E4(3,1)+2*E1(3,2)*E2(1,2)*E4(3,1)+2*E1(1,2)*E2(3,2)*E4(3,1)+2*E1(1,3)*E2(3,1)*E4(3,3)+2*E1(1,1)*E2(3,1)*E4(3,1)+2*E1(1,1)*E2(2,1)*E4(2,1)-2*E1(1,1)*E2(3,3)*E4(3,3)-2*E1(1,1)*E2(3,2)*E4(3,2);
Mcoefs(113) = 2*E1(1,1)*E2(1,1)*E4(2,1)+2*E1(3,1)*E2(3,1)*E4(2,1)+2*E1(2,1)*E2(1,1)*E4(1,1)-2*E1(1,2)*E2(2,1)*E4(1,2)-2*E1(3,3)*E2(2,1)*E4(3,3)-2*E1(2,1)*E2(3,3)*E4(3,3)-2*E1(3,2)*E2(3,2)*E4(2,1)-2*E1(1,2)*E2(1,2)*E4(2,1)-2*E1(3,2)*E2(2,1)*E4(3,2)+6*E1(2,1)*E2(2,1)*E4(2,1)+2*E1(2,2)*E2(2,1)*E4(2,2)+2*E1(1,3)*E2(1,1)*E4(2,3)+2*E1(2,2)*E2(2,2)*E4(2,1)+2*E1(1,1)*E2(2,3)*E4(1,3)+2*E1(1,2)*E2(2,2)*E4(1,1)+2*E1(2,2)*E2(1,2)*E4(1,1)-2*E1(2,1)*E2(1,2)*E4(1,2)-2*E1(2,1)*E2(3,2)*E4(3,2)-2*E1(2,1)*E2(1,3)*E4(1,3)-2*E1(1,3)*E2(2,1)*E4(1,3)+2*E1(2,3)*E2(2,3)*E4(2,1)+2*E1(2,3)*E2(1,1)*E4(1,3)+2*E1(1,1)*E2(1,3)*E4(2,3)+2*E1(3,3)*E2(3,1)*E4(2,3)+2*E1(3,1)*E2(3,3)*E4(2,3)-2*E1(1,3)*E2(1,3)*E4(2,1)+2*E1(2,3)*E2(3,1)*E4(3,3)+2*E1(2,1)*E2(2,2)*E4(2,2)+2*E1(2,3)*E2(2,1)*E4(2,3)+2*E1(2,1)*E2(2,3)*E4(2,3)+2*E1(2,3)*E2(1,3)*E4(1,1)+2*E1(1,3)*E2(2,3)*E4(1,1)+2*E1(1,1)*E2(1,2)*E4(2,2)+2*E1(3,1)*E2(2,2)*E4(3,2)+2*E1(2,2)*E2(3,2)*E4(3,1)+2*E1(3,2)*E2(2,2)*E4(3,1)+2*E1(3,3)*E2(2,3)*E4(3,1)+2*E1(2,3)*E2(3,3)*E4(3,1)+2*E1(3,1)*E2(2,3)*E4(3,3)+2*E1(2,2)*E2(1,1)*E4(1,2)+2*E1(3,2)*E2(3,1)*E4(2,2)-2*E1(3,3)*E2(3,3)*E4(2,1)+2*E1(3,1)*E2(3,2)*E4(2,2)+2*E1(1,1)*E2(2,2)*E4(1,2)+2*E1(1,2)*E2(1,1)*E4(2,2)+2*E1(2,2)*E2(3,1)*E4(3,2)+2*E1(1,1)*E2(2,1)*E4(1,1)+2*E1(2,1)*E2(3,1)*E4(3,1)+2*E1(3,1)*E2(2,1)*E4(3,1);
Mcoefs(114) = 2*E1(3,2)*E2(3,2)*E4(3,1)+2*E1(2,3)*E2(3,3)*E4(2,1)+2*E1(3,3)*E2(2,3)*E4(2,1)+2*E1(1,1)*E2(3,1)*E4(1,1)+2*E1(2,1)*E2(3,3)*E4(2,3)+2*E1(3,3)*E2(2,1)*E4(2,3)+2*E1(3,2)*E2(3,1)*E4(3,2)+2*E1(3,1)*E2(3,2)*E4(3,2)+2*E1(2,1)*E2(2,2)*E4(3,2)-2*E1(1,2)*E2(3,1)*E4(1,2)-2*E1(2,2)*E2(2,2)*E4(3,1)+2*E1(3,1)*E2(2,1)*E4(2,1)-2*E1(3,1)*E2(1,2)*E4(1,2)-2*E1(2,3)*E2(2,3)*E4(3,1)+2*E1(1,1)*E2(1,2)*E4(3,2)+2*E1(2,2)*E2(2,1)*E4(3,2)+2*E1(1,2)*E2(1,1)*E4(3,2)+2*E1(3,2)*E2(1,1)*E4(1,2)+2*E1(2,1)*E2(2,1)*E4(3,1)+2*E1(3,2)*E2(2,1)*E4(2,2)+6*E1(3,1)*E2(3,1)*E4(3,1)+2*E1(3,3)*E2(3,1)*E4(3,3)-2*E1(3,1)*E2(2,3)*E4(2,3)+2*E1(3,1)*E2(3,3)*E4(3,3)+2*E1(3,3)*E2(1,1)*E4(1,3)+2*E1(3,1)*E2(1,1)*E4(1,1)+2*E1(3,2)*E2(1,2)*E4(1,1)+2*E1(3,3)*E2(1,3)*E4(1,1)+2*E1(1,2)*E2(3,2)*E4(1,1)-2*E1(2,2)*E2(3,1)*E4(2,2)-2*E1(3,1)*E2(2,2)*E4(2,2)+2*E1(1,1)*E2(1,1)*E4(3,1)-2*E1(1,2)*E2(1,2)*E4(3,1)+2*E1(1,3)*E2(3,3)*E4(1,1)-2*E1(1,3)*E2(3,1)*E4(1,3)+2*E1(2,1)*E2(3,1)*E4(2,1)+2*E1(1,1)*E2(1,3)*E4(3,3)-2*E1(3,1)*E2(1,3)*E4(1,3)+2*E1(1,1)*E2(3,3)*E4(1,3)+2*E1(1,1)*E2(3,2)*E4(1,2)+2*E1(1,3)*E2(1,1)*E4(3,3)-2*E1(1,3)*E2(1,3)*E4(3,1)-2*E1(2,3)*E2(3,1)*E4(2,3)+2*E1(2,1)*E2(3,2)*E4(2,2)+2*E1(3,2)*E2(2,2)*E4(2,1)+2*E1(2,3)*E2(2,1)*E4(3,3)+2*E1(2,1)*E2(2,3)*E4(3,3)+2*E1(3,3)*E2(3,3)*E4(3,1)+2*E1(2,2)*E2(3,2)*E4(2,1);
Mcoefs(115) = 2*E1(3,3)*E2(1,3)*E4(3,2)+2*E1(1,3)*E2(3,3)*E4(3,2)+2*E1(1,1)*E2(1,1)*E4(1,2)+2*E1(2,2)*E2(1,1)*E4(2,1)-2*E1(3,3)*E2(3,3)*E4(1,2)+2*E1(2,2)*E2(1,3)*E4(2,3)-2*E1(1,2)*E2(2,3)*E4(2,3)-2*E1(2,3)*E2(1,2)*E4(2,3)+6*E1(1,2)*E2(1,2)*E4(1,2)+2*E1(1,1)*E2(1,2)*E4(1,1)+2*E1(1,2)*E2(1,3)*E4(1,3)-2*E1(2,1)*E2(1,2)*E4(2,1)+2*E1(1,1)*E2(2,2)*E4(2,1)+2*E1(2,2)*E2(2,3)*E4(1,3)-2*E1(1,2)*E2(2,1)*E4(2,1)-2*E1(1,2)*E2(3,3)*E4(3,3)+2*E1(1,3)*E2(1,3)*E4(1,2)-2*E1(1,2)*E2(3,1)*E4(3,1)+2*E1(1,2)*E2(2,2)*E4(2,2)+2*E1(1,1)*E2(2,1)*E4(2,2)+2*E1(2,1)*E2(1,1)*E4(2,2)+2*E1(2,1)*E2(2,2)*E4(1,1)-2*E1(3,1)*E2(1,2)*E4(3,1)-2*E1(2,3)*E2(2,3)*E4(1,2)-2*E1(3,1)*E2(3,1)*E4(1,2)-2*E1(3,3)*E2(1,2)*E4(3,3)+2*E1(3,2)*E2(3,1)*E4(1,1)+2*E1(3,1)*E2(3,2)*E4(1,1)+2*E1(3,2)*E2(1,1)*E4(3,1)+2*E1(1,1)*E2(3,2)*E4(3,1)+2*E1(2,2)*E2(2,2)*E4(1,2)+2*E1(3,2)*E2(1,2)*E4(3,2)+2*E1(1,2)*E2(3,2)*E4(3,2)+2*E1(2,3)*E2(2,2)*E4(1,3)+2*E1(1,3)*E2(2,2)*E4(2,3)+2*E1(1,2)*E2(1,1)*E4(1,1)+2*E1(3,2)*E2(3,3)*E4(1,3)+2*E1(1,3)*E2(2,3)*E4(2,2)+2*E1(2,3)*E2(1,3)*E4(2,2)+2*E1(1,1)*E2(3,1)*E4(3,2)+2*E1(2,2)*E2(2,1)*E4(1,1)+2*E1(2,2)*E2(1,2)*E4(2,2)+2*E1(1,3)*E2(1,2)*E4(1,3)-2*E1(2,1)*E2(2,1)*E4(1,2)+2*E1(3,3)*E2(3,2)*E4(1,3)+2*E1(3,2)*E2(1,3)*E4(3,3)+2*E1(1,3)*E2(3,2)*E4(3,3)+2*E1(3,2)*E2(3,2)*E4(1,2)+2*E1(3,1)*E2(1,1)*E4(3,2);
Mcoefs(116) = 2*E1(1,2)*E2(1,3)*E4(2,3)+2*E1(2,2)*E2(1,2)*E4(1,2)+2*E1(3,2)*E2(3,2)*E4(2,2)+2*E1(3,2)*E2(2,2)*E4(3,2)-2*E1(3,1)*E2(2,2)*E4(3,1)+2*E1(2,2)*E2(3,2)*E4(3,2)+2*E1(3,1)*E2(2,1)*E4(3,2)-2*E1(1,3)*E2(1,3)*E4(2,2)+2*E1(1,2)*E2(2,3)*E4(1,3)-2*E1(2,2)*E2(1,3)*E4(1,3)-2*E1(2,2)*E2(1,1)*E4(1,1)+2*E1(2,1)*E2(2,1)*E4(2,2)+2*E1(2,3)*E2(2,3)*E4(2,2)+2*E1(1,1)*E2(2,1)*E4(1,2)+2*E1(2,1)*E2(1,1)*E4(1,2)+2*E1(3,2)*E2(3,3)*E4(2,3)-2*E1(1,1)*E2(1,1)*E4(2,2)-2*E1(1,3)*E2(2,2)*E4(1,3)+2*E1(2,2)*E2(2,1)*E4(2,1)+2*E1(2,1)*E2(2,2)*E4(2,1)+2*E1(1,2)*E2(1,2)*E4(2,2)+2*E1(1,3)*E2(1,2)*E4(2,3)+2*E1(1,2)*E2(1,1)*E4(2,1)+2*E1(1,1)*E2(1,2)*E4(2,1)+2*E1(1,2)*E2(2,2)*E4(1,2)+2*E1(3,3)*E2(3,2)*E4(2,3)+2*E1(3,2)*E2(2,3)*E4(3,3)+2*E1(2,3)*E2(3,2)*E4(3,3)+2*E1(3,2)*E2(3,1)*E4(2,1)+2*E1(3,1)*E2(3,2)*E4(2,1)+2*E1(2,3)*E2(1,2)*E4(1,3)+2*E1(2,1)*E2(1,2)*E4(1,1)-2*E1(3,1)*E2(3,1)*E4(2,2)-2*E1(3,3)*E2(3,3)*E4(2,2)+2*E1(1,2)*E2(2,1)*E4(1,1)+2*E1(2,2)*E2(2,3)*E4(2,3)+2*E1(2,3)*E2(2,2)*E4(2,3)-2*E1(1,1)*E2(2,2)*E4(1,1)+6*E1(2,2)*E2(2,2)*E4(2,2)-2*E1(2,2)*E2(3,3)*E4(3,3)-2*E1(3,3)*E2(2,2)*E4(3,3)-2*E1(2,2)*E2(3,1)*E4(3,1)+2*E1(1,3)*E2(2,3)*E4(1,2)+2*E1(2,3)*E2(1,3)*E4(1,2)+2*E1(2,1)*E2(3,1)*E4(3,2)+2*E1(3,3)*E2(2,3)*E4(3,2)+2*E1(2,3)*E2(3,3)*E4(3,2)+2*E1(2,1)*E2(3,2)*E4(3,1)+2*E1(3,2)*E2(2,1)*E4(3,1);
Mcoefs(117) = 2*E1(3,3)*E2(2,3)*E4(2,2)+2*E1(2,2)*E2(2,3)*E4(3,3)+2*E1(2,3)*E2(2,2)*E4(3,3)-2*E1(1,3)*E2(3,2)*E4(1,3)-2*E1(3,2)*E2(1,3)*E4(1,3)-2*E1(2,1)*E2(3,2)*E4(2,1)+2*E1(3,3)*E2(1,2)*E4(1,3)+2*E1(1,2)*E2(3,3)*E4(1,3)+2*E1(1,2)*E2(1,1)*E4(3,1)+2*E1(1,1)*E2(1,2)*E4(3,1)+2*E1(2,2)*E2(2,1)*E4(3,1)+2*E1(3,1)*E2(2,2)*E4(2,1)+2*E1(2,2)*E2(3,1)*E4(2,1)-2*E1(2,3)*E2(3,2)*E4(2,3)+2*E1(3,3)*E2(2,2)*E4(2,3)+2*E1(2,2)*E2(3,3)*E4(2,3)+2*E1(3,1)*E2(3,1)*E4(3,2)+2*E1(3,3)*E2(3,3)*E4(3,2)-2*E1(2,1)*E2(2,1)*E4(3,2)-2*E1(2,3)*E2(2,3)*E4(3,2)-2*E1(1,1)*E2(1,1)*E4(3,2)+2*E1(3,1)*E2(3,2)*E4(3,1)+2*E1(3,2)*E2(3,1)*E4(3,1)+6*E1(3,2)*E2(3,2)*E4(3,2)+2*E1(1,2)*E2(1,3)*E4(3,3)-2*E1(3,2)*E2(2,1)*E4(2,1)+2*E1(3,3)*E2(3,2)*E4(3,3)+2*E1(3,2)*E2(1,2)*E4(1,2)+2*E1(3,1)*E2(1,2)*E4(1,1)+2*E1(2,2)*E2(2,2)*E4(3,2)+2*E1(3,2)*E2(3,3)*E4(3,3)+2*E1(1,2)*E2(3,2)*E4(1,2)+2*E1(1,2)*E2(3,1)*E4(1,1)+2*E1(1,2)*E2(1,2)*E4(3,2)+2*E1(1,1)*E2(3,1)*E4(1,2)+2*E1(3,1)*E2(1,1)*E4(1,2)+2*E1(3,3)*E2(1,3)*E4(1,2)-2*E1(1,3)*E2(1,3)*E4(3,2)+2*E1(1,3)*E2(3,3)*E4(1,2)+2*E1(3,2)*E2(2,2)*E4(2,2)-2*E1(1,1)*E2(3,2)*E4(1,1)-2*E1(3,2)*E2(1,1)*E4(1,1)-2*E1(3,2)*E2(2,3)*E4(2,3)+2*E1(2,3)*E2(3,3)*E4(2,2)+2*E1(1,3)*E2(1,2)*E4(3,3)+2*E1(2,1)*E2(2,2)*E4(3,1)+2*E1(2,1)*E2(3,1)*E4(2,2)+2*E1(3,1)*E2(2,1)*E4(2,2)+2*E1(2,2)*E2(3,2)*E4(2,2);
Mcoefs(118) = 2*E1(1,2)*E2(3,3)*E4(3,2)+2*E1(3,3)*E2(1,2)*E4(3,2)+2*E1(3,3)*E2(3,3)*E4(1,3)-2*E1(3,1)*E2(1,3)*E4(3,1)+2*E1(1,1)*E2(3,3)*E4(3,1)-2*E1(1,3)*E2(2,1)*E4(2,1)+2*E1(3,3)*E2(1,1)*E4(3,1)+2*E1(1,2)*E2(1,2)*E4(1,3)+2*E1(2,3)*E2(1,2)*E4(2,2)+2*E1(2,2)*E2(2,3)*E4(1,2)+2*E1(2,3)*E2(2,2)*E4(1,2)+2*E1(1,3)*E2(2,3)*E4(2,3)+2*E1(2,3)*E2(1,3)*E4(2,3)+2*E1(3,3)*E2(3,1)*E4(1,1)+2*E1(1,1)*E2(1,1)*E4(1,3)-2*E1(3,1)*E2(3,1)*E4(1,3)-2*E1(3,2)*E2(3,2)*E4(1,3)+2*E1(1,3)*E2(1,1)*E4(1,1)+2*E1(1,2)*E2(1,3)*E4(1,2)+2*E1(1,3)*E2(1,2)*E4(1,2)+2*E1(1,1)*E2(3,1)*E4(3,3)+2*E1(3,1)*E2(1,1)*E4(3,3)+2*E1(1,2)*E2(3,2)*E4(3,3)+2*E1(3,2)*E2(1,2)*E4(3,3)+2*E1(1,3)*E2(3,3)*E4(3,3)+2*E1(3,3)*E2(1,3)*E4(3,3)-2*E1(2,1)*E2(1,3)*E4(2,1)+2*E1(1,2)*E2(2,3)*E4(2,2)+2*E1(1,1)*E2(1,3)*E4(1,1)+2*E1(2,3)*E2(2,3)*E4(1,3)-2*E1(2,2)*E2(2,2)*E4(1,3)+2*E1(3,1)*E2(3,3)*E4(1,1)+2*E1(2,1)*E2(1,1)*E4(2,3)+2*E1(1,1)*E2(2,1)*E4(2,3)+2*E1(3,2)*E2(3,3)*E4(1,2)+2*E1(3,3)*E2(3,2)*E4(1,2)-2*E1(2,2)*E2(1,3)*E4(2,2)-2*E1(2,1)*E2(2,1)*E4(1,3)-2*E1(1,3)*E2(3,2)*E4(3,2)-2*E1(1,3)*E2(3,1)*E4(3,1)-2*E1(3,2)*E2(1,3)*E4(3,2)+6*E1(1,3)*E2(1,3)*E4(1,3)+2*E1(1,2)*E2(2,2)*E4(2,3)+2*E1(2,2)*E2(1,2)*E4(2,3)+2*E1(1,1)*E2(2,3)*E4(2,1)+2*E1(2,3)*E2(1,1)*E4(2,1)+2*E1(2,1)*E2(2,3)*E4(1,1)+2*E1(2,3)*E2(2,1)*E4(1,1)-2*E1(1,3)*E2(2,2)*E4(2,2);
Mcoefs(119) = 6*E1(2,3)*E2(2,3)*E4(2,3)+2*E1(2,2)*E2(2,3)*E4(2,2)+2*E1(2,3)*E2(2,2)*E4(2,2)-2*E1(3,1)*E2(3,1)*E4(2,3)+2*E1(2,1)*E2(2,3)*E4(2,1)-2*E1(3,2)*E2(3,2)*E4(2,3)+2*E1(2,3)*E2(2,1)*E4(2,1)+2*E1(2,1)*E2(3,3)*E4(3,1)+2*E1(3,3)*E2(2,1)*E4(3,1)+2*E1(3,1)*E2(3,3)*E4(2,1)+2*E1(1,2)*E2(1,3)*E4(2,2)+2*E1(1,3)*E2(1,2)*E4(2,2)+2*E1(3,3)*E2(3,3)*E4(2,3)+2*E1(2,3)*E2(1,3)*E4(1,3)+2*E1(1,3)*E2(2,3)*E4(1,3)+2*E1(1,3)*E2(1,1)*E4(2,1)+2*E1(1,1)*E2(1,3)*E4(2,1)-2*E1(2,3)*E2(1,1)*E4(1,1)-2*E1(1,2)*E2(2,3)*E4(1,2)-2*E1(2,3)*E2(1,2)*E4(1,2)-2*E1(3,1)*E2(2,3)*E4(3,1)+2*E1(2,3)*E2(3,3)*E4(3,3)+2*E1(2,1)*E2(2,1)*E4(2,3)+2*E1(3,3)*E2(3,1)*E4(2,1)+2*E1(2,1)*E2(3,1)*E4(3,3)+2*E1(3,1)*E2(2,1)*E4(3,3)+2*E1(2,2)*E2(3,2)*E4(3,3)+2*E1(3,2)*E2(2,2)*E4(3,3)+2*E1(2,2)*E2(1,3)*E4(1,2)+2*E1(1,3)*E2(2,2)*E4(1,2)+2*E1(1,3)*E2(1,3)*E4(2,3)-2*E1(1,1)*E2(2,3)*E4(1,1)-2*E1(3,2)*E2(2,3)*E4(3,2)-2*E1(2,3)*E2(3,2)*E4(3,2)+2*E1(3,3)*E2(2,2)*E4(3,2)+2*E1(1,3)*E2(2,1)*E4(1,1)+2*E1(2,2)*E2(3,3)*E4(3,2)+2*E1(1,1)*E2(2,1)*E4(1,3)+2*E1(2,1)*E2(1,1)*E4(1,3)+2*E1(1,2)*E2(2,2)*E4(1,3)+2*E1(2,2)*E2(1,2)*E4(1,3)+2*E1(3,2)*E2(3,3)*E4(2,2)+2*E1(3,3)*E2(2,3)*E4(3,3)+2*E1(2,2)*E2(2,2)*E4(2,3)+2*E1(3,3)*E2(3,2)*E4(2,2)-2*E1(2,3)*E2(3,1)*E4(3,1)-2*E1(1,1)*E2(1,1)*E4(2,3)-2*E1(1,2)*E2(1,2)*E4(2,3)+2*E1(2,1)*E2(1,3)*E4(1,1);
Mcoefs(120) = 2*E1(1,1)*E2(3,1)*E4(1,3)+2*E1(3,1)*E2(1,1)*E4(1,3)+2*E1(1,2)*E2(3,2)*E4(1,3)+2*E1(3,2)*E2(1,2)*E4(1,3)+2*E1(3,2)*E2(2,3)*E4(2,2)+2*E1(2,3)*E2(2,2)*E4(3,2)-2*E1(2,2)*E2(2,2)*E4(3,3)+2*E1(2,1)*E2(2,3)*E4(3,1)+2*E1(2,3)*E2(2,1)*E4(3,1)+2*E1(3,1)*E2(2,3)*E4(2,1)+2*E1(2,3)*E2(3,1)*E4(2,1)+2*E1(3,1)*E2(3,1)*E4(3,3)+2*E1(3,2)*E2(3,2)*E4(3,3)+2*E1(3,3)*E2(3,1)*E4(3,1)+2*E1(3,2)*E2(3,3)*E4(3,2)+2*E1(1,1)*E2(1,3)*E4(3,1)+2*E1(1,3)*E2(1,3)*E4(3,3)-2*E1(1,1)*E2(3,3)*E4(1,1)-2*E1(1,1)*E2(1,1)*E4(3,3)+2*E1(3,2)*E2(2,2)*E4(2,3)+2*E1(2,3)*E2(3,2)*E4(2,2)+2*E1(2,3)*E2(2,3)*E4(3,3)+2*E1(2,1)*E2(3,1)*E4(2,3)+2*E1(3,1)*E2(2,1)*E4(2,3)+2*E1(2,2)*E2(2,3)*E4(3,2)-2*E1(3,3)*E2(2,1)*E4(2,1)-2*E1(2,2)*E2(3,3)*E4(2,2)-2*E1(3,3)*E2(2,2)*E4(2,2)+2*E1(3,1)*E2(3,3)*E4(3,1)-2*E1(3,3)*E2(1,1)*E4(1,1)+2*E1(2,2)*E2(3,2)*E4(2,3)+2*E1(2,3)*E2(3,3)*E4(2,3)+2*E1(3,3)*E2(2,3)*E4(2,3)+2*E1(1,3)*E2(3,1)*E4(1,1)+2*E1(3,1)*E2(1,3)*E4(1,1)+2*E1(1,2)*E2(1,3)*E4(3,2)+2*E1(1,3)*E2(1,2)*E4(3,2)+2*E1(3,2)*E2(1,3)*E4(1,2)-2*E1(2,1)*E2(2,1)*E4(3,3)-2*E1(1,2)*E2(1,2)*E4(3,3)-2*E1(1,2)*E2(3,3)*E4(1,2)-2*E1(3,3)*E2(1,2)*E4(1,2)-2*E1(2,1)*E2(3,3)*E4(2,1)+2*E1(3,3)*E2(1,3)*E4(1,3)+2*E1(3,3)*E2(3,2)*E4(3,2)+6*E1(3,3)*E2(3,3)*E4(3,3)+2*E1(1,3)*E2(3,2)*E4(1,2)+2*E1(1,3)*E2(1,1)*E4(3,1)+2*E1(1,3)*E2(3,3)*E4(1,3);
Mcoefs(121) = -E2(3,1)*E2(1,3)*E4(2,2)-E2(3,1)*E2(2,2)*E4(1,3)-E2(2,2)*E2(1,3)*E4(3,1)-E2(1,2)*E2(3,3)*E4(2,1)+E2(3,1)*E2(2,3)*E4(1,2)+E2(3,1)*E2(1,2)*E4(2,3)+E2(3,2)*E2(1,3)*E4(2,1)+E2(2,2)*E2(3,3)*E4(1,1)+E2(2,1)*E2(3,2)*E4(1,3)+E2(2,1)*E2(1,3)*E4(3,2)+E2(1,2)*E2(2,3)*E4(3,1)-E2(1,1)*E2(2,3)*E4(3,2)-E2(1,1)*E2(3,2)*E4(2,3)-E2(3,2)*E2(2,3)*E4(1,1)-E2(2,1)*E2(1,2)*E4(3,3)-E2(2,1)*E2(3,3)*E4(1,2)+E2(1,1)*E2(3,3)*E4(2,2)+E2(1,1)*E2(2,2)*E4(3,3);
Mcoefs(122) = -E2(3,2)^2*E4(1,1)+2*E2(2,1)*E2(2,3)*E4(1,3)+2*E2(2,1)*E2(1,3)*E4(2,3)+2*E2(1,2)*E2(2,2)*E4(2,1)+2*E2(2,1)*E2(2,2)*E4(1,2)+2*E2(1,3)*E2(2,3)*E4(2,1)-2*E2(1,1)*E2(2,3)*E4(2,3)+2*E2(1,1)*E2(1,2)*E4(1,2)+2*E2(1,1)*E2(2,1)*E4(2,1)+2*E2(1,3)*E2(3,3)*E4(3,1)+2*E2(1,2)*E2(3,2)*E4(3,1)+2*E2(1,1)*E2(3,1)*E4(3,1)-2*E2(1,1)*E2(3,3)*E4(3,3)-2*E2(1,1)*E2(3,2)*E4(3,2)-2*E2(1,1)*E2(2,2)*E4(2,2)+2*E2(3,1)*E2(1,2)*E4(3,2)+2*E2(3,1)*E2(3,2)*E4(1,2)+2*E2(3,1)*E2(3,3)*E4(1,3)+2*E2(3,1)*E2(1,3)*E4(3,3)+2*E2(2,1)*E2(1,2)*E4(2,2)+2*E2(1,1)*E2(1,3)*E4(1,3)-E2(3,3)^2*E4(1,1)-E2(2,2)^2*E4(1,1)+E2(3,1)^2*E4(1,1)+E2(2,1)^2*E4(1,1)+E2(1,2)^2*E4(1,1)-E2(2,3)^2*E4(1,1)+E2(1,3)^2*E4(1,1)+3*E2(1,1)^2*E4(1,1);
Mcoefs(123) = E2(3,1)^2*E4(2,1)+E2(2,2)^2*E4(2,1)+E2(2,3)^2*E4(2,1)+E2(1,1)^2*E4(2,1)+3*E2(2,1)^2*E4(2,1)-E2(3,2)^2*E4(2,1)-E2(1,2)^2*E4(2,1)-E2(3,3)^2*E4(2,1)-E2(1,3)^2*E4(2,1)+2*E2(1,1)*E2(1,3)*E4(2,3)-2*E2(2,1)*E2(1,2)*E4(1,2)+2*E2(1,1)*E2(1,2)*E4(2,2)+2*E2(1,1)*E2(2,1)*E4(1,1)+2*E2(1,3)*E2(2,3)*E4(1,1)+2*E2(1,2)*E2(2,2)*E4(1,1)+2*E2(2,1)*E2(2,2)*E4(2,2)+2*E2(2,1)*E2(2,3)*E4(2,3)+2*E2(1,1)*E2(2,3)*E4(1,3)-2*E2(2,1)*E2(3,2)*E4(3,2)-2*E2(2,1)*E2(1,3)*E4(1,3)+2*E2(1,1)*E2(2,2)*E4(1,2)-2*E2(2,1)*E2(3,3)*E4(3,3)+2*E2(3,1)*E2(3,3)*E4(2,3)+2*E2(3,1)*E2(2,3)*E4(3,3)+2*E2(2,3)*E2(3,3)*E4(3,1)+2*E2(3,1)*E2(3,2)*E4(2,2)+2*E2(3,2)*E2(2,2)*E4(3,1)+2*E2(3,1)*E2(2,2)*E4(3,2)+2*E2(2,1)*E2(3,1)*E4(3,1);
Mcoefs(124) = 3*E2(3,1)^2*E4(3,1)+E2(3,3)^2*E4(3,1)+2*E2(2,1)*E2(3,2)*E4(2,2)+2*E2(2,1)*E2(2,3)*E4(3,3)-2*E2(3,1)*E2(2,2)*E4(2,2)+2*E2(1,1)*E2(3,1)*E4(1,1)+2*E2(2,1)*E2(3,3)*E4(2,3)+2*E2(3,1)*E2(3,2)*E4(3,2)+2*E2(2,1)*E2(2,2)*E4(3,2)-2*E2(3,1)*E2(1,2)*E4(1,2)+2*E2(1,1)*E2(1,2)*E4(3,2)-2*E2(3,1)*E2(2,3)*E4(2,3)+2*E2(3,1)*E2(3,3)*E4(3,3)+E2(3,2)^2*E4(3,1)+E2(2,1)^2*E4(3,1)-E2(1,2)^2*E4(3,1)-E2(2,2)^2*E4(3,1)-E2(2,3)^2*E4(3,1)-E2(1,3)^2*E4(3,1)+2*E2(1,3)*E2(3,3)*E4(1,1)+2*E2(1,2)*E2(3,2)*E4(1,1)+2*E2(2,3)*E2(3,3)*E4(2,1)+2*E2(3,2)*E2(2,2)*E4(2,1)+2*E2(1,1)*E2(1,3)*E4(3,3)-2*E2(3,1)*E2(1,3)*E4(1,3)+2*E2(2,1)*E2(3,1)*E4(2,1)+2*E2(1,1)*E2(3,3)*E4(1,3)+2*E2(1,1)*E2(3,2)*E4(1,2)+E2(1,1)^2*E4(3,1);
Mcoefs(125) = E2(3,2)^2*E4(1,2)+E2(1,3)^2*E4(1,2)+3*E2(1,2)^2*E4(1,2)-E2(2,3)^2*E4(1,2)-E2(3,1)^2*E4(1,2)-E2(3,3)^2*E4(1,2)-E2(2,1)^2*E4(1,2)+E2(1,1)^2*E4(1,2)+2*E2(2,2)*E2(1,3)*E4(2,3)+2*E2(1,1)*E2(3,2)*E4(3,1)+2*E2(3,1)*E2(3,2)*E4(1,1)+2*E2(1,1)*E2(1,2)*E4(1,1)+2*E2(1,2)*E2(1,3)*E4(1,3)+2*E2(1,1)*E2(2,1)*E4(2,2)+2*E2(1,3)*E2(2,3)*E4(2,2)+2*E2(1,3)*E2(3,3)*E4(3,2)+2*E2(1,1)*E2(2,2)*E4(2,1)+2*E2(2,2)*E2(2,3)*E4(1,3)+2*E2(1,2)*E2(2,2)*E4(2,2)+2*E2(2,1)*E2(2,2)*E4(1,1)-2*E2(1,2)*E2(2,3)*E4(2,3)-2*E2(2,1)*E2(1,2)*E4(2,1)-2*E2(1,2)*E2(3,3)*E4(3,3)+2*E2(3,2)*E2(3,3)*E4(1,3)+2*E2(3,2)*E2(1,3)*E4(3,3)+2*E2(1,2)*E2(3,2)*E4(3,2)+E2(2,2)^2*E4(1,2)-2*E2(3,1)*E2(1,2)*E4(3,1)+2*E2(1,1)*E2(3,1)*E4(3,2);
Mcoefs(126) = -E2(3,1)^2*E4(2,2)+3*E2(2,2)^2*E4(2,2)+E2(2,1)^2*E4(2,2)-E2(1,1)^2*E4(2,2)-2*E2(2,2)*E2(1,3)*E4(1,3)+2*E2(2,1)*E2(3,2)*E4(3,1)+2*E2(3,1)*E2(3,2)*E4(2,1)-2*E2(3,1)*E2(2,2)*E4(3,1)-2*E2(1,1)*E2(2,2)*E4(1,1)+2*E2(3,2)*E2(3,3)*E4(2,3)+2*E2(1,1)*E2(1,2)*E4(2,1)+2*E2(2,1)*E2(1,2)*E4(1,1)-2*E2(2,2)*E2(3,3)*E4(3,3)+2*E2(2,1)*E2(3,1)*E4(3,2)+2*E2(1,1)*E2(2,1)*E4(1,2)+2*E2(2,1)*E2(2,2)*E4(2,1)+2*E2(2,2)*E2(2,3)*E4(2,3)+2*E2(2,3)*E2(3,3)*E4(3,2)+2*E2(1,2)*E2(2,2)*E4(1,2)+2*E2(1,2)*E2(2,3)*E4(1,3)+2*E2(1,2)*E2(1,3)*E4(2,3)+2*E2(3,2)*E2(2,3)*E4(3,3)+2*E2(1,3)*E2(2,3)*E4(1,2)+2*E2(3,2)*E2(2,2)*E4(3,2)+E2(2,3)^2*E4(2,2)+E2(3,2)^2*E4(2,2)-E2(3,3)^2*E4(2,2)+E2(1,2)^2*E4(2,2)-E2(1,3)^2*E4(2,2);
Mcoefs(127) = 2*E2(2,1)*E2(2,2)*E4(3,1)+2*E2(3,1)*E2(2,2)*E4(2,1)+2*E2(3,1)*E2(3,2)*E4(3,1)-2*E2(1,1)*E2(3,2)*E4(1,1)-2*E2(3,2)*E2(2,3)*E4(2,3)+2*E2(1,2)*E2(1,3)*E4(3,3)+2*E2(2,1)*E2(3,1)*E4(2,2)+2*E2(1,3)*E2(3,3)*E4(1,2)+2*E2(1,2)*E2(3,3)*E4(1,3)+2*E2(3,1)*E2(1,2)*E4(1,1)+2*E2(1,1)*E2(1,2)*E4(3,1)+2*E2(1,2)*E2(3,2)*E4(1,2)-2*E2(2,1)*E2(3,2)*E4(2,1)+2*E2(2,2)*E2(3,3)*E4(2,3)+2*E2(3,2)*E2(3,3)*E4(3,3)+2*E2(2,2)*E2(2,3)*E4(3,3)+2*E2(1,1)*E2(3,1)*E4(1,2)-2*E2(3,2)*E2(1,3)*E4(1,3)+2*E2(2,3)*E2(3,3)*E4(2,2)+2*E2(3,2)*E2(2,2)*E4(2,2)+3*E2(3,2)^2*E4(3,2)-E2(2,3)^2*E4(3,2)-E2(1,3)^2*E4(3,2)-E2(2,1)^2*E4(3,2)-E2(1,1)^2*E4(3,2)+E2(2,2)^2*E4(3,2)+E2(3,3)^2*E4(3,2)+E2(3,1)^2*E4(3,2)+E2(1,2)^2*E4(3,2);
Mcoefs(128) = 2*E2(1,1)*E2(2,3)*E4(2,1)+2*E2(3,1)*E2(3,3)*E4(1,1)+2*E2(1,1)*E2(3,3)*E4(3,1)+2*E2(1,1)*E2(1,3)*E4(1,1)+2*E2(1,1)*E2(2,1)*E4(2,3)+2*E2(1,2)*E2(2,2)*E4(2,3)+2*E2(1,1)*E2(3,1)*E4(3,3)+2*E2(1,2)*E2(3,3)*E4(3,2)-2*E2(3,1)*E2(1,3)*E4(3,1)-2*E2(3,2)*E2(1,3)*E4(3,2)+2*E2(1,2)*E2(1,3)*E4(1,2)+2*E2(1,3)*E2(2,3)*E4(2,3)+2*E2(1,3)*E2(3,3)*E4(3,3)+2*E2(2,2)*E2(2,3)*E4(1,2)+2*E2(1,2)*E2(3,2)*E4(3,3)+2*E2(1,2)*E2(2,3)*E4(2,2)+2*E2(2,1)*E2(2,3)*E4(1,1)-2*E2(2,1)*E2(1,3)*E4(2,1)-2*E2(2,2)*E2(1,3)*E4(2,2)+2*E2(3,2)*E2(3,3)*E4(1,2)+E2(1,2)^2*E4(1,3)+E2(2,3)^2*E4(1,3)+3*E2(1,3)^2*E4(1,3)+E2(3,3)^2*E4(1,3)+E2(1,1)^2*E4(1,3)-E2(2,1)^2*E4(1,3)-E2(2,2)^2*E4(1,3)-E2(3,2)^2*E4(1,3)-E2(3,1)^2*E4(1,3);
Mcoefs(129) = 2*E2(3,2)*E2(3,3)*E4(2,2)+2*E2(2,3)*E2(3,3)*E4(3,3)+2*E2(1,2)*E2(2,2)*E4(1,3)+2*E2(2,1)*E2(3,3)*E4(3,1)+2*E2(3,1)*E2(3,3)*E4(2,1)+2*E2(2,1)*E2(3,1)*E4(3,3)-2*E2(1,2)*E2(2,3)*E4(1,2)+2*E2(1,3)*E2(2,3)*E4(1,3)+2*E2(1,1)*E2(2,1)*E4(1,3)+2*E2(2,2)*E2(1,3)*E4(1,2)+2*E2(1,1)*E2(1,3)*E4(2,1)-2*E2(3,2)*E2(2,3)*E4(3,2)+2*E2(2,1)*E2(2,3)*E4(2,1)+2*E2(3,2)*E2(2,2)*E4(3,3)+2*E2(1,2)*E2(1,3)*E4(2,2)+2*E2(2,1)*E2(1,3)*E4(1,1)-2*E2(3,1)*E2(2,3)*E4(3,1)-2*E2(1,1)*E2(2,3)*E4(1,1)+2*E2(2,2)*E2(2,3)*E4(2,2)+2*E2(2,2)*E2(3,3)*E4(3,2)-E2(3,2)^2*E4(2,3)-E2(3,1)^2*E4(2,3)-E2(1,1)^2*E4(2,3)+E2(2,1)^2*E4(2,3)+3*E2(2,3)^2*E4(2,3)+E2(1,3)^2*E4(2,3)+E2(2,2)^2*E4(2,3)+E2(3,3)^2*E4(2,3)-E2(1,2)^2*E4(2,3);
Mcoefs(130) = -E2(1,1)^2*E4(3,3)-E2(2,1)^2*E4(3,3)-E2(1,2)^2*E4(3,3)+3*E2(3,3)^2*E4(3,3)+E2(2,3)^2*E4(3,3)-E2(2,2)^2*E4(3,3)+E2(3,1)^2*E4(3,3)+E2(1,3)^2*E4(3,3)+E2(3,2)^2*E4(3,3)-2*E2(2,2)*E2(3,3)*E4(2,2)+2*E2(3,2)*E2(3,3)*E4(3,2)+2*E2(1,1)*E2(3,1)*E4(1,3)+2*E2(1,3)*E2(3,3)*E4(1,3)+2*E2(2,1)*E2(2,3)*E4(3,1)+2*E2(3,2)*E2(1,3)*E4(1,2)-2*E2(2,1)*E2(3,3)*E4(2,1)+2*E2(3,1)*E2(2,3)*E4(2,1)+2*E2(3,2)*E2(2,2)*E4(2,3)+2*E2(3,1)*E2(1,3)*E4(1,1)+2*E2(1,2)*E2(1,3)*E4(3,2)-2*E2(1,1)*E2(3,3)*E4(1,1)-2*E2(1,2)*E2(3,3)*E4(1,2)+2*E2(3,1)*E2(3,3)*E4(3,1)+2*E2(2,1)*E2(3,1)*E4(2,3)+2*E2(2,3)*E2(3,3)*E4(2,3)+2*E2(2,2)*E2(2,3)*E4(3,2)+2*E2(3,2)*E2(2,3)*E4(2,2)+2*E2(1,1)*E2(1,3)*E4(3,1)+2*E2(1,2)*E2(3,2)*E4(1,3);
Mcoefs(131) = -E1(1,1)*E3(2,3)*E4(3,2)-E1(3,1)*E3(1,3)*E4(2,2)+E1(1,3)*E3(2,1)*E4(3,2)+E1(3,3)*E3(1,1)*E4(2,2)+E1(2,3)*E3(1,2)*E4(3,1)-E1(3,1)*E3(2,2)*E4(1,3)+E1(3,1)*E3(1,2)*E4(2,3)-E1(3,3)*E3(1,2)*E4(2,1)-E1(3,3)*E3(2,1)*E4(1,2)-E1(3,2)*E3(1,1)*E4(2,3)-E1(2,1)*E3(3,3)*E4(1,2)-E1(2,3)*E3(1,1)*E4(3,2)-E1(2,2)*E3(1,3)*E4(3,1)+E1(1,2)*E3(3,1)*E4(2,3)+E1(2,3)*E3(3,1)*E4(1,2)-E1(1,2)*E3(2,1)*E4(3,3)-E1(1,3)*E3(3,1)*E4(2,2)+E1(2,1)*E3(3,2)*E4(1,3)+E1(1,2)*E3(2,3)*E4(3,1)+E1(2,1)*E3(1,3)*E4(3,2)-E1(2,1)*E3(1,2)*E4(3,3)-E1(2,2)*E3(3,1)*E4(1,3)+E1(3,3)*E3(2,2)*E4(1,1)+E1(2,2)*E3(3,3)*E4(1,1)+E1(1,1)*E3(3,3)*E4(2,2)+E1(2,2)*E3(1,1)*E4(3,3)+E1(3,2)*E3(2,1)*E4(1,3)+E1(1,3)*E3(3,2)*E4(2,1)-E1(2,3)*E3(3,2)*E4(1,1)+E1(3,2)*E3(1,3)*E4(2,1)-E1(3,2)*E3(2,3)*E4(1,1)+E1(1,1)*E3(2,2)*E4(3,3)-E1(1,2)*E3(3,3)*E4(2,1)-E1(1,3)*E3(2,2)*E4(3,1)-E1(1,1)*E3(3,2)*E4(2,3)+E1(3,1)*E3(2,3)*E4(1,2);
Mcoefs(132) = 2*E1(1,2)*E3(1,2)*E4(1,1)+2*E1(2,1)*E3(2,3)*E4(1,3)+2*E1(1,2)*E3(3,2)*E4(3,1)+2*E1(3,1)*E3(1,1)*E4(3,1)+2*E1(1,2)*E3(2,2)*E4(2,1)+2*E1(1,1)*E3(1,3)*E4(1,3)+2*E1(1,2)*E3(1,1)*E4(1,2)+2*E1(2,1)*E3(1,3)*E4(2,3)+6*E1(1,1)*E3(1,1)*E4(1,1)+2*E1(1,3)*E3(1,3)*E4(1,1)-2*E1(2,2)*E3(1,1)*E4(2,2)-2*E1(1,1)*E3(2,3)*E4(2,3)-2*E1(1,1)*E3(2,2)*E4(2,2)+2*E1(2,3)*E3(1,3)*E4(2,1)+2*E1(1,1)*E3(3,1)*E4(3,1)+2*E1(1,3)*E3(3,3)*E4(3,1)+2*E1(1,3)*E3(1,1)*E4(1,3)-2*E1(1,1)*E3(3,3)*E4(3,3)-2*E1(1,1)*E3(3,2)*E4(3,2)+2*E1(3,2)*E3(3,1)*E4(1,2)+2*E1(2,1)*E3(2,1)*E4(1,1)+2*E1(1,3)*E3(2,1)*E4(2,3)+2*E1(2,2)*E3(1,2)*E4(2,1)+2*E1(3,3)*E3(3,1)*E4(1,3)+2*E1(3,1)*E3(3,3)*E4(1,3)-2*E1(3,2)*E3(1,1)*E4(3,2)+2*E1(3,1)*E3(3,2)*E4(1,2)+2*E1(2,1)*E3(2,2)*E4(1,2)+2*E1(2,2)*E3(2,1)*E4(1,2)-2*E1(2,2)*E3(2,2)*E4(1,1)+2*E1(1,3)*E3(2,3)*E4(2,1)+2*E1(3,2)*E3(1,2)*E4(3,1)+2*E1(2,1)*E3(1,1)*E4(2,1)-2*E1(3,3)*E3(1,1)*E4(3,3)+2*E1(2,3)*E3(2,1)*E4(1,3)+2*E1(3,1)*E3(1,3)*E4(3,3)+2*E1(1,3)*E3(3,1)*E4(3,3)+2*E1(1,1)*E3(1,2)*E4(1,2)+2*E1(2,1)*E3(1,2)*E4(2,2)+2*E1(1,2)*E3(2,1)*E4(2,2)-2*E1(3,2)*E3(3,2)*E4(1,1)-2*E1(2,3)*E3(2,3)*E4(1,1)-2*E1(3,3)*E3(3,3)*E4(1,1)+2*E1(1,1)*E3(2,1)*E4(2,1)-2*E1(2,3)*E3(1,1)*E4(2,3)+2*E1(3,3)*E3(1,3)*E4(3,1)+2*E1(3,1)*E3(1,2)*E4(3,2)+2*E1(1,2)*E3(3,1)*E4(3,2)+2*E1(3,1)*E3(3,1)*E4(1,1);
Mcoefs(133) = 2*E1(2,3)*E3(1,3)*E4(1,1)-2*E1(1,3)*E3(2,1)*E4(1,3)+2*E1(2,3)*E3(1,1)*E4(1,3)+2*E1(3,3)*E3(3,1)*E4(2,3)-2*E1(2,1)*E3(3,3)*E4(3,3)+2*E1(3,1)*E3(3,3)*E4(2,3)-2*E1(2,1)*E3(1,2)*E4(1,2)-2*E1(2,1)*E3(3,2)*E4(3,2)+2*E1(3,1)*E3(3,2)*E4(2,2)+2*E1(1,1)*E3(1,2)*E4(2,2)+2*E1(2,2)*E3(3,2)*E4(3,1)+2*E1(1,3)*E3(2,3)*E4(1,1)+2*E1(2,2)*E3(1,2)*E4(1,1)+2*E1(3,1)*E3(2,2)*E4(3,2)+2*E1(2,2)*E3(1,1)*E4(1,2)+2*E1(3,2)*E3(3,1)*E4(2,2)-2*E1(3,3)*E3(3,3)*E4(2,1)+2*E1(3,1)*E3(2,1)*E4(3,1)+2*E1(1,1)*E3(1,1)*E4(2,1)+2*E1(3,1)*E3(3,1)*E4(2,1)+2*E1(2,1)*E3(1,1)*E4(1,1)-2*E1(1,2)*E3(2,1)*E4(1,2)+6*E1(2,1)*E3(2,1)*E4(2,1)+2*E1(2,1)*E3(2,3)*E4(2,3)+2*E1(2,2)*E3(2,1)*E4(2,2)+2*E1(2,2)*E3(2,2)*E4(2,1)+2*E1(3,3)*E3(2,3)*E4(3,1)+2*E1(1,1)*E3(2,1)*E4(1,1)+2*E1(2,1)*E3(3,1)*E4(3,1)+2*E1(3,1)*E3(2,3)*E4(3,3)+2*E1(1,2)*E3(2,2)*E4(1,1)+2*E1(2,2)*E3(3,1)*E4(3,2)+2*E1(3,2)*E3(2,2)*E4(3,1)+2*E1(1,2)*E3(1,1)*E4(2,2)+2*E1(2,3)*E3(3,3)*E4(3,1)+2*E1(2,3)*E3(2,3)*E4(2,1)+2*E1(1,1)*E3(1,3)*E4(2,3)+2*E1(2,1)*E3(2,2)*E4(2,2)+2*E1(1,3)*E3(1,1)*E4(2,3)-2*E1(1,3)*E3(1,3)*E4(2,1)+2*E1(2,3)*E3(2,1)*E4(2,3)+2*E1(2,3)*E3(3,1)*E4(3,3)+2*E1(1,1)*E3(2,2)*E4(1,2)-2*E1(3,2)*E3(2,1)*E4(3,2)-2*E1(1,2)*E3(1,2)*E4(2,1)+2*E1(1,1)*E3(2,3)*E4(1,3)-2*E1(2,1)*E3(1,3)*E4(1,3)-2*E1(3,2)*E3(3,2)*E4(2,1)-2*E1(3,3)*E3(2,1)*E4(3,3);
Mcoefs(134) = 2*E1(2,1)*E3(3,3)*E4(2,3)+2*E1(2,1)*E3(3,2)*E4(2,2)+2*E1(2,2)*E3(3,2)*E4(2,1)+2*E1(2,3)*E3(2,1)*E4(3,3)-2*E1(3,1)*E3(1,2)*E4(1,2)+2*E1(3,2)*E3(2,2)*E4(2,1)+2*E1(2,1)*E3(3,1)*E4(2,1)+2*E1(1,3)*E3(1,1)*E4(3,3)+2*E1(1,1)*E3(3,2)*E4(1,2)-2*E1(3,1)*E3(2,2)*E4(2,2)+2*E1(1,3)*E3(3,3)*E4(1,1)-2*E1(1,3)*E3(1,3)*E4(3,1)-2*E1(2,3)*E3(3,1)*E4(2,3)-2*E1(2,2)*E3(2,2)*E4(3,1)-2*E1(2,3)*E3(2,3)*E4(3,1)+2*E1(3,3)*E3(2,1)*E4(2,3)+2*E1(3,2)*E3(3,1)*E4(3,2)-2*E1(1,2)*E3(1,2)*E4(3,1)+2*E1(3,1)*E3(3,2)*E4(3,2)+2*E1(3,1)*E3(3,3)*E4(3,3)+2*E1(1,1)*E3(1,2)*E4(3,2)+2*E1(3,2)*E3(1,1)*E4(1,2)+2*E1(3,2)*E3(2,1)*E4(2,2)+2*E1(3,1)*E3(2,1)*E4(2,1)+2*E1(1,2)*E3(3,2)*E4(1,1)-2*E1(2,2)*E3(3,1)*E4(2,2)+2*E1(1,1)*E3(1,1)*E4(3,1)+2*E1(3,3)*E3(1,3)*E4(1,1)+2*E1(1,1)*E3(1,3)*E4(3,3)+2*E1(3,3)*E3(3,1)*E4(3,3)+2*E1(2,2)*E3(2,1)*E4(3,2)+6*E1(3,1)*E3(3,1)*E4(3,1)+2*E1(1,1)*E3(3,3)*E4(1,3)+2*E1(1,2)*E3(1,1)*E4(3,2)+2*E1(2,1)*E3(2,1)*E4(3,1)-2*E1(1,2)*E3(3,1)*E4(1,2)-2*E1(3,1)*E3(1,3)*E4(1,3)+2*E1(2,1)*E3(2,2)*E4(3,2)-2*E1(3,1)*E3(2,3)*E4(2,3)+2*E1(3,3)*E3(1,1)*E4(1,3)+2*E1(3,1)*E3(1,1)*E4(1,1)+2*E1(3,2)*E3(1,2)*E4(1,1)+2*E1(1,1)*E3(3,1)*E4(1,1)-2*E1(1,3)*E3(3,1)*E4(1,3)+2*E1(3,2)*E3(3,2)*E4(3,1)+2*E1(2,1)*E3(2,3)*E4(3,3)+2*E1(3,3)*E3(2,3)*E4(2,1)+2*E1(2,3)*E3(3,3)*E4(2,1)+2*E1(3,3)*E3(3,3)*E4(3,1);
Mcoefs(135) = 2*E1(1,1)*E3(2,1)*E4(2,2)+2*E1(2,1)*E3(1,1)*E4(2,2)+2*E1(2,2)*E3(2,1)*E4(1,1)+2*E1(1,3)*E3(3,3)*E4(3,2)+2*E1(3,3)*E3(1,3)*E4(3,2)+2*E1(1,1)*E3(1,1)*E4(1,2)+2*E1(3,3)*E3(3,2)*E4(1,3)+2*E1(3,2)*E3(1,3)*E4(3,3)-2*E1(2,1)*E3(2,1)*E4(1,2)+2*E1(2,1)*E3(2,2)*E4(1,1)+2*E1(1,3)*E3(2,2)*E4(2,3)+2*E1(1,3)*E3(1,2)*E4(1,3)-2*E1(2,1)*E3(1,2)*E4(2,1)+2*E1(2,2)*E3(1,2)*E4(2,2)+2*E1(1,3)*E3(3,2)*E4(3,3)+2*E1(3,2)*E3(3,2)*E4(1,2)+2*E1(2,3)*E3(2,2)*E4(1,3)+2*E1(3,2)*E3(1,2)*E4(3,2)+2*E1(1,2)*E3(3,2)*E4(3,2)+2*E1(3,1)*E3(1,1)*E4(3,2)+2*E1(1,1)*E3(3,1)*E4(3,2)-2*E1(1,2)*E3(2,1)*E4(2,1)-2*E1(1,2)*E3(3,3)*E4(3,3)+2*E1(3,2)*E3(3,1)*E4(1,1)+2*E1(2,2)*E3(1,1)*E4(2,1)-2*E1(2,3)*E3(2,3)*E4(1,2)+2*E1(1,2)*E3(1,3)*E4(1,3)+2*E1(2,2)*E3(2,2)*E4(1,2)+6*E1(1,2)*E3(1,2)*E4(1,2)+2*E1(1,3)*E3(1,3)*E4(1,2)+2*E1(1,2)*E3(2,2)*E4(2,2)+2*E1(1,1)*E3(2,2)*E4(2,1)+2*E1(1,2)*E3(1,1)*E4(1,1)+2*E1(2,2)*E3(2,3)*E4(1,3)-2*E1(3,3)*E3(1,2)*E4(3,3)+2*E1(3,1)*E3(3,2)*E4(1,1)+2*E1(3,2)*E3(1,1)*E4(3,1)+2*E1(1,1)*E3(3,2)*E4(3,1)-2*E1(2,3)*E3(1,2)*E4(2,3)+2*E1(1,1)*E3(1,2)*E4(1,1)+2*E1(3,2)*E3(3,3)*E4(1,3)+2*E1(1,3)*E3(2,3)*E4(2,2)+2*E1(2,3)*E3(1,3)*E4(2,2)-2*E1(1,2)*E3(3,1)*E4(3,1)-2*E1(1,2)*E3(2,3)*E4(2,3)+2*E1(2,2)*E3(1,3)*E4(2,3)-2*E1(3,1)*E3(3,1)*E4(1,2)-2*E1(3,3)*E3(3,3)*E4(1,2)-2*E1(3,1)*E3(1,2)*E4(3,1);
Mcoefs(136) = 2*E1(1,1)*E3(1,2)*E4(2,1)+2*E1(2,3)*E3(1,2)*E4(1,3)+2*E1(2,1)*E3(1,2)*E4(1,1)+2*E1(1,2)*E3(1,1)*E4(2,1)+2*E1(2,2)*E3(1,2)*E4(1,2)+2*E1(1,2)*E3(2,3)*E4(1,3)+2*E1(1,3)*E3(2,3)*E4(1,2)+2*E1(2,3)*E3(1,3)*E4(1,2)+2*E1(3,3)*E3(2,3)*E4(3,2)+2*E1(2,1)*E3(1,1)*E4(1,2)-2*E1(1,1)*E3(2,2)*E4(1,1)+2*E1(3,2)*E3(3,1)*E4(2,1)+2*E1(3,1)*E3(3,2)*E4(2,1)+2*E1(3,2)*E3(2,1)*E4(3,1)+2*E1(1,2)*E3(2,1)*E4(1,1)-2*E1(2,2)*E3(3,1)*E4(3,1)-2*E1(2,2)*E3(1,3)*E4(1,3)+2*E1(1,2)*E3(2,2)*E4(1,2)+2*E1(2,2)*E3(3,2)*E4(3,2)-2*E1(3,1)*E3(2,2)*E4(3,1)+2*E1(2,1)*E3(2,1)*E4(2,2)+2*E1(2,3)*E3(2,3)*E4(2,2)+2*E1(1,1)*E3(2,1)*E4(1,2)+2*E1(2,1)*E3(3,2)*E4(3,1)+2*E1(3,2)*E3(3,3)*E4(2,3)-2*E1(1,1)*E3(1,1)*E4(2,2)+2*E1(2,3)*E3(2,2)*E4(2,3)+6*E1(2,2)*E3(2,2)*E4(2,2)-2*E1(3,3)*E3(2,2)*E4(3,3)+2*E1(3,3)*E3(3,2)*E4(2,3)+2*E1(3,2)*E3(2,3)*E4(3,3)+2*E1(2,3)*E3(3,2)*E4(3,3)-2*E1(3,1)*E3(3,1)*E4(2,2)-2*E1(3,3)*E3(3,3)*E4(2,2)+2*E1(3,2)*E3(3,2)*E4(2,2)+2*E1(2,3)*E3(3,3)*E4(3,2)+2*E1(2,1)*E3(3,1)*E4(3,2)-2*E1(2,2)*E3(1,1)*E4(1,1)+2*E1(3,2)*E3(2,2)*E4(3,2)+2*E1(3,1)*E3(2,1)*E4(3,2)+2*E1(1,3)*E3(1,2)*E4(2,3)+2*E1(2,2)*E3(2,3)*E4(2,3)-2*E1(2,2)*E3(3,3)*E4(3,3)-2*E1(1,3)*E3(1,3)*E4(2,2)-2*E1(1,3)*E3(2,2)*E4(1,3)+2*E1(2,2)*E3(2,1)*E4(2,1)+2*E1(2,1)*E3(2,2)*E4(2,1)+2*E1(1,2)*E3(1,3)*E4(2,3)+2*E1(1,2)*E3(1,2)*E4(2,2);
Mcoefs(137) = 2*E1(2,1)*E3(3,1)*E4(2,2)+2*E1(3,1)*E3(2,1)*E4(2,2)+2*E1(2,2)*E3(3,2)*E4(2,2)+2*E1(3,3)*E3(1,2)*E4(1,3)-2*E1(1,3)*E3(1,3)*E4(3,2)+2*E1(3,3)*E3(1,3)*E4(1,2)+2*E1(3,2)*E3(1,2)*E4(1,2)+2*E1(3,1)*E3(1,2)*E4(1,1)+2*E1(1,2)*E3(1,1)*E4(3,1)+2*E1(1,2)*E3(3,2)*E4(1,2)+2*E1(2,2)*E3(3,3)*E4(2,3)-2*E1(2,1)*E3(3,2)*E4(2,1)-2*E1(1,1)*E3(3,2)*E4(1,1)-2*E1(3,2)*E3(1,1)*E4(1,1)+2*E1(2,2)*E3(2,2)*E4(3,2)+2*E1(2,3)*E3(3,3)*E4(2,2)+2*E1(3,3)*E3(2,3)*E4(2,2)+2*E1(2,3)*E3(2,2)*E4(3,3)+2*E1(2,2)*E3(2,3)*E4(3,3)-2*E1(1,3)*E3(3,2)*E4(1,3)-2*E1(3,2)*E3(1,3)*E4(1,3)+2*E1(1,1)*E3(1,2)*E4(3,1)+2*E1(3,1)*E3(2,2)*E4(2,1)+2*E1(2,2)*E3(3,1)*E4(2,1)-2*E1(3,2)*E3(2,1)*E4(2,1)-2*E1(2,1)*E3(2,1)*E4(3,2)-2*E1(2,3)*E3(2,3)*E4(3,2)-2*E1(1,1)*E3(1,1)*E4(3,2)+2*E1(3,1)*E3(3,2)*E4(3,1)+2*E1(3,3)*E3(3,2)*E4(3,3)+2*E1(3,2)*E3(3,3)*E4(3,3)+2*E1(1,2)*E3(1,2)*E4(3,2)+2*E1(1,1)*E3(3,1)*E4(1,2)+2*E1(2,2)*E3(2,1)*E4(3,1)+2*E1(3,1)*E3(1,1)*E4(1,2)+2*E1(1,3)*E3(3,3)*E4(1,2)+2*E1(1,2)*E3(3,3)*E4(1,3)-2*E1(2,3)*E3(3,2)*E4(2,3)-2*E1(3,2)*E3(2,3)*E4(2,3)+2*E1(3,2)*E3(3,1)*E4(3,1)+2*E1(2,1)*E3(2,2)*E4(3,1)+6*E1(3,2)*E3(3,2)*E4(3,2)+2*E1(3,2)*E3(2,2)*E4(2,2)+2*E1(1,3)*E3(1,2)*E4(3,3)+2*E1(1,2)*E3(1,3)*E4(3,3)+2*E1(3,3)*E3(3,3)*E4(3,2)+2*E1(3,3)*E3(2,2)*E4(2,3)+2*E1(3,1)*E3(3,1)*E4(3,2)+2*E1(1,2)*E3(3,1)*E4(1,1);
Mcoefs(138) = -2*E1(3,1)*E3(1,3)*E4(3,1)-2*E1(3,1)*E3(3,1)*E4(1,3)-2*E1(1,3)*E3(2,1)*E4(2,1)-2*E1(1,3)*E3(2,2)*E4(2,2)+2*E1(2,1)*E3(1,1)*E4(2,3)+2*E1(1,2)*E3(2,2)*E4(2,3)+2*E1(2,2)*E3(1,2)*E4(2,3)+2*E1(3,1)*E3(1,1)*E4(3,3)-2*E1(2,2)*E3(2,2)*E4(1,3)+2*E1(3,1)*E3(3,3)*E4(1,1)+2*E1(1,1)*E3(2,1)*E4(2,3)+2*E1(3,2)*E3(3,3)*E4(1,2)+2*E1(3,3)*E3(3,2)*E4(1,2)+2*E1(1,3)*E3(3,3)*E4(3,3)+2*E1(2,3)*E3(1,2)*E4(2,2)+2*E1(2,2)*E3(2,3)*E4(1,2)+2*E1(2,3)*E3(2,2)*E4(1,2)+2*E1(1,3)*E3(2,3)*E4(2,3)-2*E1(2,1)*E3(1,3)*E4(2,1)+2*E1(1,1)*E3(3,3)*E4(3,1)+2*E1(3,3)*E3(1,1)*E4(3,1)+2*E1(1,1)*E3(1,1)*E4(1,3)+2*E1(1,2)*E3(1,2)*E4(1,3)+2*E1(2,3)*E3(1,3)*E4(2,3)+2*E1(1,3)*E3(1,1)*E4(1,1)+2*E1(1,2)*E3(1,3)*E4(1,2)-2*E1(3,2)*E3(3,2)*E4(1,3)+2*E1(3,3)*E3(3,1)*E4(1,1)+2*E1(1,2)*E3(3,3)*E4(3,2)+2*E1(3,3)*E3(1,2)*E4(3,2)+2*E1(3,3)*E3(3,3)*E4(1,3)+2*E1(1,1)*E3(3,1)*E4(3,3)+2*E1(1,2)*E3(3,2)*E4(3,3)+2*E1(3,2)*E3(1,2)*E4(3,3)+2*E1(3,3)*E3(1,3)*E4(3,3)-2*E1(2,1)*E3(2,1)*E4(1,3)-2*E1(2,2)*E3(1,3)*E4(2,2)-2*E1(1,3)*E3(3,1)*E4(3,1)-2*E1(3,2)*E3(1,3)*E4(3,2)-2*E1(1,3)*E3(3,2)*E4(3,2)+2*E1(1,1)*E3(2,3)*E4(2,1)+2*E1(2,3)*E3(1,1)*E4(2,1)+2*E1(2,3)*E3(2,1)*E4(1,1)+2*E1(1,2)*E3(2,3)*E4(2,2)+2*E1(2,1)*E3(2,3)*E4(1,1)+6*E1(1,3)*E3(1,3)*E4(1,3)+2*E1(1,1)*E3(1,3)*E4(1,1)+2*E1(1,3)*E3(1,2)*E4(1,2)+2*E1(2,3)*E3(2,3)*E4(1,3);
Mcoefs(139) = -2*E1(1,1)*E3(1,1)*E4(2,3)-2*E1(1,2)*E3(1,2)*E4(2,3)+2*E1(2,1)*E3(1,3)*E4(1,1)+2*E1(1,3)*E3(1,1)*E4(2,1)+2*E1(2,2)*E3(2,3)*E4(2,2)+2*E1(2,3)*E3(2,2)*E4(2,2)-2*E1(3,2)*E3(3,2)*E4(2,3)+2*E1(2,2)*E3(1,3)*E4(1,2)-2*E1(3,1)*E3(3,1)*E4(2,3)+2*E1(2,2)*E3(1,2)*E4(1,3)+2*E1(3,2)*E3(3,3)*E4(2,2)+2*E1(3,3)*E3(3,2)*E4(2,2)+2*E1(2,3)*E3(3,3)*E4(3,3)+2*E1(3,3)*E3(2,3)*E4(3,3)+2*E1(3,3)*E3(2,1)*E4(3,1)+2*E1(3,3)*E3(3,1)*E4(2,1)+2*E1(2,1)*E3(3,1)*E4(3,3)+2*E1(3,1)*E3(2,1)*E4(3,3)+2*E1(2,2)*E3(3,2)*E4(3,3)+2*E1(3,1)*E3(3,3)*E4(2,1)+2*E1(3,3)*E3(3,3)*E4(2,3)+2*E1(2,3)*E3(1,3)*E4(1,3)+2*E1(1,3)*E3(2,3)*E4(1,3)+2*E1(1,1)*E3(1,3)*E4(2,1)-2*E1(1,1)*E3(2,3)*E4(1,1)+2*E1(3,2)*E3(2,2)*E4(3,3)+6*E1(2,3)*E3(2,3)*E4(2,3)+2*E1(2,1)*E3(3,3)*E4(3,1)+2*E1(1,3)*E3(2,2)*E4(1,2)+2*E1(1,3)*E3(1,3)*E4(2,3)-2*E1(3,2)*E3(2,3)*E4(3,2)-2*E1(2,3)*E3(3,2)*E4(3,2)-2*E1(2,3)*E3(1,2)*E4(1,2)-2*E1(1,2)*E3(2,3)*E4(1,2)-2*E1(3,1)*E3(2,3)*E4(3,1)-2*E1(2,3)*E3(3,1)*E4(3,1)+2*E1(1,2)*E3(1,3)*E4(2,2)+2*E1(1,3)*E3(1,2)*E4(2,2)+2*E1(2,2)*E3(3,3)*E4(3,2)+2*E1(3,3)*E3(2,2)*E4(3,2)+2*E1(1,3)*E3(2,1)*E4(1,1)+2*E1(1,1)*E3(2,1)*E4(1,3)+2*E1(2,1)*E3(1,1)*E4(1,3)+2*E1(1,2)*E3(2,2)*E4(1,3)-2*E1(2,3)*E3(1,1)*E4(1,1)+2*E1(2,1)*E3(2,3)*E4(2,1)+2*E1(2,3)*E3(2,1)*E4(2,1)+2*E1(2,1)*E3(2,1)*E4(2,3)+2*E1(2,2)*E3(2,2)*E4(2,3);
Mcoefs(140) = -2*E1(2,1)*E3(2,1)*E4(3,3)+2*E1(3,3)*E3(2,3)*E4(2,3)-2*E1(3,3)*E3(1,1)*E4(1,1)-2*E1(1,2)*E3(3,3)*E4(1,2)+2*E1(2,1)*E3(2,3)*E4(3,1)-2*E1(3,3)*E3(2,2)*E4(2,2)+2*E1(3,1)*E3(3,1)*E4(3,3)+2*E1(3,2)*E3(3,2)*E4(3,3)+2*E1(1,1)*E3(1,3)*E4(3,1)+2*E1(1,3)*E3(1,3)*E4(3,3)+2*E1(3,3)*E3(3,2)*E4(3,2)-2*E1(1,1)*E3(1,1)*E4(3,3)-2*E1(1,2)*E3(1,2)*E4(3,3)+6*E1(3,3)*E3(3,3)*E4(3,3)-2*E1(1,1)*E3(3,3)*E4(1,1)+2*E1(2,3)*E3(2,2)*E4(3,2)+2*E1(3,2)*E3(2,3)*E4(2,2)+2*E1(2,3)*E3(3,2)*E4(2,2)+2*E1(2,3)*E3(2,3)*E4(3,3)+2*E1(3,1)*E3(1,3)*E4(1,1)+2*E1(1,2)*E3(1,3)*E4(3,2)+2*E1(1,3)*E3(1,2)*E4(3,2)+2*E1(3,2)*E3(1,3)*E4(1,2)+2*E1(2,2)*E3(2,3)*E4(3,2)+2*E1(3,2)*E3(2,2)*E4(2,3)+2*E1(2,1)*E3(3,1)*E4(2,3)+2*E1(3,1)*E3(2,1)*E4(2,3)+2*E1(2,2)*E3(3,2)*E4(2,3)+2*E1(2,3)*E3(3,3)*E4(2,3)+2*E1(3,3)*E3(1,3)*E4(1,3)+2*E1(1,3)*E3(3,3)*E4(1,3)+2*E1(1,3)*E3(3,1)*E4(1,1)+2*E1(3,1)*E3(3,3)*E4(3,1)+2*E1(3,3)*E3(3,1)*E4(3,1)+2*E1(3,2)*E3(3,3)*E4(3,2)+2*E1(1,3)*E3(3,2)*E4(1,2)+2*E1(1,3)*E3(1,1)*E4(3,1)+2*E1(1,1)*E3(3,1)*E4(1,3)+2*E1(3,1)*E3(1,1)*E4(1,3)+2*E1(3,2)*E3(1,2)*E4(1,3)+2*E1(1,2)*E3(3,2)*E4(1,3)+2*E1(2,3)*E3(2,1)*E4(3,1)+2*E1(3,1)*E3(2,3)*E4(2,1)+2*E1(2,3)*E3(3,1)*E4(2,1)-2*E1(2,2)*E3(2,2)*E4(3,3)-2*E1(3,3)*E3(1,2)*E4(1,2)-2*E1(2,1)*E3(3,3)*E4(2,1)-2*E1(3,3)*E3(2,1)*E4(2,1)-2*E1(2,2)*E3(3,3)*E4(2,2);
Mcoefs(141) = E2(1,1)*E3(2,2)*E4(3,3)+E2(2,1)*E3(3,2)*E4(1,3)+E2(3,3)*E3(1,1)*E4(2,2)+E2(1,2)*E3(3,1)*E4(2,3)-E2(1,1)*E3(3,2)*E4(2,3)+E2(2,3)*E3(1,2)*E4(3,1)-E2(1,1)*E3(2,3)*E4(3,2)-E2(2,3)*E3(1,1)*E4(3,2)-E2(3,2)*E3(1,1)*E4(2,3)-E2(2,3)*E3(3,2)*E4(1,1)-E2(3,2)*E3(2,3)*E4(1,1)-E2(2,1)*E3(3,3)*E4(1,2)-E2(1,2)*E3(3,3)*E4(2,1)+E2(2,3)*E3(3,1)*E4(1,2)+E2(1,1)*E3(3,3)*E4(2,2)+E2(3,1)*E3(2,3)*E4(1,2)+E2(3,3)*E3(2,2)*E4(1,1)+E2(2,2)*E3(3,3)*E4(1,1)+E2(1,3)*E3(2,1)*E4(3,2)-E2(3,1)*E3(1,3)*E4(2,2)-E2(3,1)*E3(2,2)*E4(1,3)-E2(1,3)*E3(3,1)*E4(2,2)-E2(2,2)*E3(3,1)*E4(1,3)-E2(1,3)*E3(2,2)*E4(3,1)-E2(2,2)*E3(1,3)*E4(3,1)+E2(2,1)*E3(1,3)*E4(3,2)+E2(3,2)*E3(1,3)*E4(2,1)+E2(1,3)*E3(3,2)*E4(2,1)+E2(3,2)*E3(2,1)*E4(1,3)+E2(1,2)*E3(2,3)*E4(3,1)+E2(3,1)*E3(1,2)*E4(2,3)-E2(2,1)*E3(1,2)*E4(3,3)-E2(1,2)*E3(2,1)*E4(3,3)-E2(3,3)*E3(2,1)*E4(1,2)-E2(3,3)*E3(1,2)*E4(2,1)+E2(2,2)*E3(1,1)*E4(3,3);
Mcoefs(142) = -2*E2(2,2)*E3(2,2)*E4(1,1)+2*E2(2,2)*E3(1,2)*E4(2,1)+2*E2(1,2)*E3(2,2)*E4(2,1)-2*E2(3,2)*E3(3,2)*E4(1,1)+2*E2(1,2)*E3(1,2)*E4(1,1)+2*E2(2,1)*E3(1,1)*E4(2,1)+2*E2(1,1)*E3(2,1)*E4(2,1)+2*E2(1,3)*E3(2,3)*E4(2,1)+2*E2(1,2)*E3(3,1)*E4(3,2)+2*E2(3,1)*E3(3,1)*E4(1,1)-2*E2(1,1)*E3(2,3)*E4(2,3)+2*E2(1,3)*E3(1,1)*E4(1,3)+2*E2(1,1)*E3(1,3)*E4(1,3)+6*E2(1,1)*E3(1,1)*E4(1,1)+2*E2(3,1)*E3(3,3)*E4(1,3)+2*E2(3,2)*E3(1,2)*E4(3,1)+2*E2(3,3)*E3(3,1)*E4(1,3)+2*E2(3,1)*E3(1,3)*E4(3,3)+2*E2(1,3)*E3(3,1)*E4(3,3)+2*E2(3,1)*E3(3,2)*E4(1,2)+2*E2(3,2)*E3(3,1)*E4(1,2)+2*E2(2,1)*E3(2,3)*E4(1,3)+2*E2(2,3)*E3(2,1)*E4(1,3)-2*E2(2,3)*E3(1,1)*E4(2,3)+2*E2(3,1)*E3(1,2)*E4(3,2)+2*E2(1,1)*E3(1,2)*E4(1,2)-2*E2(2,3)*E3(2,3)*E4(1,1)+2*E2(3,3)*E3(1,3)*E4(3,1)+2*E2(1,3)*E3(3,3)*E4(3,1)-2*E2(3,3)*E3(3,3)*E4(1,1)-2*E2(1,1)*E3(3,3)*E4(3,3)-2*E2(3,3)*E3(1,1)*E4(3,3)-2*E2(1,1)*E3(3,2)*E4(3,2)-2*E2(3,2)*E3(1,1)*E4(3,2)-2*E2(2,2)*E3(1,1)*E4(2,2)-2*E2(1,1)*E3(2,2)*E4(2,2)+2*E2(1,2)*E3(3,2)*E4(3,1)+2*E2(3,1)*E3(1,1)*E4(3,1)+2*E2(1,1)*E3(3,1)*E4(3,1)+2*E2(1,3)*E3(1,3)*E4(1,1)+2*E2(2,2)*E3(2,1)*E4(1,2)+2*E2(2,1)*E3(1,2)*E4(2,2)+2*E2(1,2)*E3(1,1)*E4(1,2)+2*E2(1,3)*E3(2,1)*E4(2,3)+2*E2(2,1)*E3(2,2)*E4(1,2)+2*E2(2,3)*E3(1,3)*E4(2,1)+2*E2(1,2)*E3(2,1)*E4(2,2)+2*E2(2,1)*E3(2,1)*E4(1,1)+2*E2(2,1)*E3(1,3)*E4(2,3);
Mcoefs(143) = -2*E2(3,2)*E3(3,2)*E4(2,1)-2*E2(1,3)*E3(1,3)*E4(2,1)+2*E2(3,3)*E3(3,1)*E4(2,3)+2*E2(3,1)*E3(3,3)*E4(2,3)+2*E2(2,3)*E3(3,1)*E4(3,3)+2*E2(1,1)*E3(2,2)*E4(1,2)+2*E2(2,3)*E3(1,3)*E4(1,1)+2*E2(2,3)*E3(1,1)*E4(1,3)+2*E2(1,3)*E3(2,3)*E4(1,1)+2*E2(2,2)*E3(1,2)*E4(1,1)+6*E2(2,1)*E3(2,1)*E4(2,1)+2*E2(1,3)*E3(1,1)*E4(2,3)+2*E2(1,2)*E3(2,2)*E4(1,1)+2*E2(2,2)*E3(2,2)*E4(2,1)+2*E2(2,3)*E3(2,3)*E4(2,1)+2*E2(1,1)*E3(1,3)*E4(2,3)+2*E2(3,1)*E3(2,1)*E4(3,1)+2*E2(1,1)*E3(1,1)*E4(2,1)+2*E2(3,1)*E3(3,1)*E4(2,1)+2*E2(2,2)*E3(2,1)*E4(2,2)-2*E2(3,3)*E3(2,1)*E4(3,3)-2*E2(2,1)*E3(3,3)*E4(3,3)+2*E2(2,1)*E3(2,2)*E4(2,2)+2*E2(2,3)*E3(2,1)*E4(2,3)+2*E2(2,1)*E3(2,3)*E4(2,3)-2*E2(1,2)*E3(2,1)*E4(1,2)+2*E2(3,1)*E3(2,3)*E4(3,3)+2*E2(3,2)*E3(3,1)*E4(2,2)+2*E2(3,1)*E3(3,2)*E4(2,2)+2*E2(2,2)*E3(1,1)*E4(1,2)-2*E2(3,3)*E3(3,3)*E4(2,1)+2*E2(1,2)*E3(1,1)*E4(2,2)+2*E2(1,1)*E3(1,2)*E4(2,2)+2*E2(3,1)*E3(2,2)*E4(3,2)+2*E2(2,2)*E3(3,2)*E4(3,1)+2*E2(3,2)*E3(2,2)*E4(3,1)+2*E2(2,3)*E3(3,3)*E4(3,1)+2*E2(3,3)*E3(2,3)*E4(3,1)+2*E2(2,2)*E3(3,1)*E4(3,2)+2*E2(2,1)*E3(1,1)*E4(1,1)+2*E2(1,1)*E3(2,1)*E4(1,1)+2*E2(2,1)*E3(3,1)*E4(3,1)-2*E2(1,2)*E3(1,2)*E4(2,1)+2*E2(1,1)*E3(2,3)*E4(1,3)-2*E2(2,1)*E3(1,2)*E4(1,2)-2*E2(2,1)*E3(3,2)*E4(3,2)-2*E2(3,2)*E3(2,1)*E4(3,2)-2*E2(1,3)*E3(2,1)*E4(1,3)-2*E2(2,1)*E3(1,3)*E4(1,3);
Mcoefs(144) = 2*E2(3,2)*E3(2,1)*E4(2,2)+2*E2(1,1)*E3(1,1)*E4(3,1)+2*E2(3,2)*E3(3,1)*E4(3,2)+2*E2(3,3)*E3(2,1)*E4(2,3)+2*E2(3,1)*E3(3,3)*E4(3,3)+2*E2(3,1)*E3(3,2)*E4(3,2)+2*E2(3,1)*E3(1,1)*E4(1,1)+2*E2(1,1)*E3(1,2)*E4(3,2)-2*E2(3,1)*E3(1,2)*E4(1,2)+2*E2(2,1)*E3(2,1)*E4(3,1)+6*E2(3,1)*E3(3,1)*E4(3,1)-2*E2(1,2)*E3(3,1)*E4(1,2)+2*E2(2,1)*E3(2,2)*E4(3,2)+2*E2(1,1)*E3(3,1)*E4(1,1)-2*E2(3,1)*E3(2,3)*E4(2,3)-2*E2(2,3)*E3(3,1)*E4(2,3)+2*E2(1,2)*E3(3,2)*E4(1,1)+2*E2(3,2)*E3(1,1)*E4(1,2)-2*E2(1,3)*E3(3,1)*E4(1,3)+2*E2(2,1)*E3(3,1)*E4(2,1)+2*E2(1,1)*E3(3,2)*E4(1,2)+2*E2(3,1)*E3(2,1)*E4(2,1)+2*E2(2,1)*E3(3,3)*E4(2,3)+2*E2(2,1)*E3(3,2)*E4(2,2)+2*E2(3,3)*E3(3,1)*E4(3,3)+2*E2(2,3)*E3(2,1)*E4(3,3)+2*E2(3,2)*E3(2,2)*E4(2,1)+2*E2(1,1)*E3(1,3)*E4(3,3)+2*E2(2,2)*E3(3,2)*E4(2,1)+2*E2(2,2)*E3(2,1)*E4(3,2)-2*E2(3,1)*E3(1,3)*E4(1,3)+2*E2(1,1)*E3(3,3)*E4(1,3)+2*E2(1,3)*E3(1,1)*E4(3,3)+2*E2(3,3)*E3(1,1)*E4(1,3)+2*E2(3,2)*E3(3,2)*E4(3,1)+2*E2(2,1)*E3(2,3)*E4(3,3)+2*E2(3,3)*E3(2,3)*E4(2,1)+2*E2(2,3)*E3(3,3)*E4(2,1)+2*E2(3,3)*E3(3,3)*E4(3,1)+2*E2(3,2)*E3(1,2)*E4(1,1)-2*E2(3,1)*E3(2,2)*E4(2,2)+2*E2(1,3)*E3(3,3)*E4(1,1)-2*E2(2,2)*E3(3,1)*E4(2,2)+2*E2(3,3)*E3(1,3)*E4(1,1)-2*E2(1,3)*E3(1,3)*E4(3,1)-2*E2(1,2)*E3(1,2)*E4(3,1)-2*E2(2,3)*E3(2,3)*E4(3,1)-2*E2(2,2)*E3(2,2)*E4(3,1)+2*E2(1,2)*E3(1,1)*E4(3,2);
Mcoefs(145) = 2*E2(3,2)*E3(3,1)*E4(1,1)+6*E2(1,2)*E3(1,2)*E4(1,2)+2*E2(1,3)*E3(1,3)*E4(1,2)-2*E2(3,3)*E3(1,2)*E4(3,3)+2*E2(3,1)*E3(3,2)*E4(1,1)+2*E2(3,2)*E3(1,1)*E4(3,1)+2*E2(1,1)*E3(3,2)*E4(3,1)+2*E2(1,2)*E3(1,1)*E4(1,1)+2*E2(2,2)*E3(2,2)*E4(1,2)+2*E2(2,2)*E3(1,2)*E4(2,2)-2*E2(1,2)*E3(3,1)*E4(3,1)+2*E2(2,2)*E3(1,3)*E4(2,3)+2*E2(1,3)*E3(2,2)*E4(2,3)+2*E2(2,2)*E3(2,3)*E4(1,3)+2*E2(2,3)*E3(2,2)*E4(1,3)+2*E2(3,2)*E3(1,3)*E4(3,3)+2*E2(1,3)*E3(3,2)*E4(3,3)+2*E2(3,2)*E3(3,2)*E4(1,2)+2*E2(1,1)*E3(1,2)*E4(1,1)-2*E2(3,1)*E3(1,2)*E4(3,1)-2*E2(2,1)*E3(2,1)*E4(1,2)+2*E2(3,3)*E3(3,2)*E4(1,3)+2*E2(1,1)*E3(2,2)*E4(2,1)-2*E2(3,1)*E3(3,1)*E4(1,2)-2*E2(3,3)*E3(3,3)*E4(1,2)-2*E2(2,3)*E3(1,2)*E4(2,3)+2*E2(1,3)*E3(1,2)*E4(1,3)-2*E2(1,2)*E3(2,3)*E4(2,3)-2*E2(2,3)*E3(2,3)*E4(1,2)+2*E2(1,2)*E3(1,3)*E4(1,3)+2*E2(1,2)*E3(2,2)*E4(2,2)+2*E2(2,2)*E3(2,1)*E4(1,1)+2*E2(2,1)*E3(2,2)*E4(1,1)+2*E2(3,3)*E3(1,3)*E4(3,2)+2*E2(1,1)*E3(1,1)*E4(1,2)+2*E2(1,3)*E3(3,3)*E4(3,2)+2*E2(3,2)*E3(3,3)*E4(1,3)+2*E2(1,3)*E3(2,3)*E4(2,2)+2*E2(2,3)*E3(1,3)*E4(2,2)+2*E2(1,1)*E3(2,1)*E4(2,2)+2*E2(2,1)*E3(1,1)*E4(2,2)+2*E2(2,2)*E3(1,1)*E4(2,1)+2*E2(3,2)*E3(1,2)*E4(3,2)+2*E2(1,2)*E3(3,2)*E4(3,2)+2*E2(1,1)*E3(3,1)*E4(3,2)+2*E2(3,1)*E3(1,1)*E4(3,2)-2*E2(2,1)*E3(1,2)*E4(2,1)-2*E2(1,2)*E3(2,1)*E4(2,1)-2*E2(1,2)*E3(3,3)*E4(3,3);
Mcoefs(146) = -2*E2(2,2)*E3(3,3)*E4(3,3)+2*E2(2,3)*E3(2,2)*E4(2,3)+6*E2(2,2)*E3(2,2)*E4(2,2)+2*E2(2,2)*E3(2,3)*E4(2,3)-2*E2(3,3)*E3(2,2)*E4(3,3)-2*E2(2,2)*E3(3,1)*E4(3,1)+2*E2(2,1)*E3(1,1)*E4(1,2)+2*E2(2,2)*E3(2,1)*E4(2,1)+2*E2(2,1)*E3(1,2)*E4(1,1)+2*E2(2,1)*E3(3,2)*E4(3,1)+2*E2(3,2)*E3(3,3)*E4(2,3)+2*E2(3,3)*E3(3,2)*E4(2,3)-2*E2(1,1)*E3(1,1)*E4(2,2)+2*E2(2,3)*E3(3,2)*E4(3,3)+2*E2(3,2)*E3(2,3)*E4(3,3)+2*E2(3,2)*E3(3,1)*E4(2,1)+2*E2(3,1)*E3(3,2)*E4(2,1)+2*E2(1,3)*E3(1,2)*E4(2,3)+2*E2(1,2)*E3(2,1)*E4(1,1)-2*E2(2,2)*E3(1,1)*E4(1,1)+2*E2(3,2)*E3(3,2)*E4(2,2)+2*E2(3,2)*E3(2,2)*E4(3,2)-2*E2(2,2)*E3(1,3)*E4(1,3)-2*E2(1,3)*E3(2,2)*E4(1,3)+2*E2(2,2)*E3(3,2)*E4(3,2)+2*E2(2,1)*E3(2,1)*E4(2,2)+2*E2(2,3)*E3(2,3)*E4(2,2)+2*E2(1,1)*E3(2,1)*E4(1,2)+2*E2(1,2)*E3(2,3)*E4(1,3)+2*E2(1,2)*E3(2,2)*E4(1,2)+2*E2(2,2)*E3(1,2)*E4(1,2)+2*E2(1,3)*E3(2,3)*E4(1,2)+2*E2(3,1)*E3(2,1)*E4(3,2)-2*E2(3,3)*E3(3,3)*E4(2,2)-2*E2(1,3)*E3(1,3)*E4(2,2)-2*E2(3,1)*E3(3,1)*E4(2,2)+2*E2(3,2)*E3(2,1)*E4(3,1)+2*E2(2,1)*E3(2,2)*E4(2,1)+2*E2(1,2)*E3(1,3)*E4(2,3)+2*E2(1,2)*E3(1,2)*E4(2,2)+2*E2(1,1)*E3(1,2)*E4(2,1)+2*E2(2,3)*E3(1,2)*E4(1,3)+2*E2(1,2)*E3(1,1)*E4(2,1)+2*E2(2,3)*E3(1,3)*E4(1,2)+2*E2(2,3)*E3(3,3)*E4(3,2)+2*E2(3,3)*E3(2,3)*E4(3,2)+2*E2(2,1)*E3(3,1)*E4(3,2)-2*E2(3,1)*E3(2,2)*E4(3,1)-2*E2(1,1)*E3(2,2)*E4(1,1);
Mcoefs(147) = 2*E2(1,3)*E3(1,2)*E4(3,3)+2*E2(1,2)*E3(3,1)*E4(1,1)-2*E2(2,1)*E3(3,2)*E4(2,1)+2*E2(3,2)*E3(3,3)*E4(3,3)+2*E2(3,1)*E3(1,2)*E4(1,1)+2*E2(2,2)*E3(2,1)*E4(3,1)+2*E2(1,2)*E3(1,2)*E4(3,2)+2*E2(1,1)*E3(3,1)*E4(1,2)+2*E2(3,1)*E3(1,1)*E4(1,2)+2*E2(1,3)*E3(3,3)*E4(1,2)+2*E2(3,3)*E3(1,3)*E4(1,2)+2*E2(3,2)*E3(1,2)*E4(1,2)+2*E2(1,2)*E3(1,1)*E4(3,1)+2*E2(1,2)*E3(3,2)*E4(1,2)+2*E2(3,3)*E3(2,2)*E4(2,3)+2*E2(2,2)*E3(3,3)*E4(2,3)+2*E2(1,1)*E3(1,2)*E4(3,1)+2*E2(2,1)*E3(2,2)*E4(3,1)+2*E2(3,1)*E3(2,1)*E4(2,2)-2*E2(1,1)*E3(3,2)*E4(1,1)-2*E2(3,2)*E3(1,1)*E4(1,1)-2*E2(2,3)*E3(3,2)*E4(2,3)-2*E2(3,2)*E3(2,3)*E4(2,3)+2*E2(3,3)*E3(2,3)*E4(2,2)+2*E2(2,3)*E3(2,2)*E4(3,3)+2*E2(2,2)*E3(2,3)*E4(3,3)-2*E2(1,3)*E3(3,2)*E4(1,3)-2*E2(3,2)*E3(1,3)*E4(1,3)+2*E2(1,2)*E3(3,3)*E4(1,3)+2*E2(2,2)*E3(3,2)*E4(2,2)+2*E2(3,3)*E3(1,2)*E4(1,3)+2*E2(3,2)*E3(2,2)*E4(2,2)+2*E2(1,2)*E3(1,3)*E4(3,3)+2*E2(3,3)*E3(3,3)*E4(3,2)+2*E2(2,2)*E3(2,2)*E4(3,2)+2*E2(2,3)*E3(3,3)*E4(2,2)+2*E2(3,1)*E3(3,2)*E4(3,1)+6*E2(3,2)*E3(3,2)*E4(3,2)-2*E2(2,3)*E3(2,3)*E4(3,2)+2*E2(2,1)*E3(3,1)*E4(2,2)+2*E2(3,2)*E3(3,1)*E4(3,1)+2*E2(3,1)*E3(3,1)*E4(3,2)+2*E2(3,1)*E3(2,2)*E4(2,1)+2*E2(2,2)*E3(3,1)*E4(2,1)-2*E2(3,2)*E3(2,1)*E4(2,1)-2*E2(1,3)*E3(1,3)*E4(3,2)-2*E2(2,1)*E3(2,1)*E4(3,2)-2*E2(1,1)*E3(1,1)*E4(3,2)+2*E2(3,3)*E3(3,2)*E4(3,3);
Mcoefs(148) = 2*E2(3,3)*E3(1,1)*E4(3,1)+2*E2(1,1)*E3(1,1)*E4(1,3)+2*E2(1,2)*E3(1,2)*E4(1,3)+2*E2(2,3)*E3(1,3)*E4(2,3)+2*E2(1,3)*E3(1,1)*E4(1,1)+2*E2(1,1)*E3(1,3)*E4(1,1)-2*E2(2,1)*E3(1,3)*E4(2,1)-2*E2(1,3)*E3(2,1)*E4(2,1)+2*E2(1,2)*E3(1,3)*E4(1,2)-2*E2(2,2)*E3(1,3)*E4(2,2)-2*E2(1,3)*E3(2,2)*E4(2,2)+2*E2(1,1)*E3(2,1)*E4(2,3)+2*E2(2,1)*E3(1,1)*E4(2,3)+2*E2(1,2)*E3(2,2)*E4(2,3)+2*E2(2,2)*E3(1,2)*E4(2,3)+2*E2(3,1)*E3(1,1)*E4(3,3)+2*E2(1,2)*E3(2,3)*E4(2,2)+2*E2(2,3)*E3(1,2)*E4(2,2)+2*E2(1,1)*E3(3,1)*E4(3,3)+2*E2(1,2)*E3(3,2)*E4(3,3)+2*E2(3,2)*E3(1,2)*E4(3,3)+2*E2(1,3)*E3(3,3)*E4(3,3)+2*E2(3,3)*E3(1,3)*E4(3,3)-2*E2(2,2)*E3(2,2)*E4(1,3)-2*E2(2,1)*E3(2,1)*E4(1,3)-2*E2(3,1)*E3(3,1)*E4(1,3)-2*E2(1,3)*E3(3,1)*E4(3,1)-2*E2(3,2)*E3(1,3)*E4(3,2)-2*E2(1,3)*E3(3,2)*E4(3,2)+2*E2(1,1)*E3(3,3)*E4(3,1)+2*E2(2,2)*E3(2,3)*E4(1,2)+2*E2(2,3)*E3(2,2)*E4(1,2)+2*E2(1,3)*E3(2,3)*E4(2,3)+2*E2(3,1)*E3(3,3)*E4(1,1)+2*E2(3,3)*E3(3,1)*E4(1,1)+2*E2(1,2)*E3(3,3)*E4(3,2)+2*E2(3,3)*E3(1,2)*E4(3,2)+2*E2(3,3)*E3(3,3)*E4(1,3)-2*E2(3,1)*E3(1,3)*E4(3,1)+2*E2(1,1)*E3(2,3)*E4(2,1)+2*E2(2,3)*E3(1,1)*E4(2,1)+2*E2(2,1)*E3(2,3)*E4(1,1)+2*E2(2,3)*E3(2,1)*E4(1,1)+6*E2(1,3)*E3(1,3)*E4(1,3)+2*E2(1,3)*E3(1,2)*E4(1,2)+2*E2(2,3)*E3(2,3)*E4(1,3)-2*E2(3,2)*E3(3,2)*E4(1,3)+2*E2(3,2)*E3(3,3)*E4(1,2)+2*E2(3,3)*E3(3,2)*E4(1,2);
Mcoefs(149) = -2*E2(2,3)*E3(1,1)*E4(1,1)-2*E2(1,1)*E3(2,3)*E4(1,1)+2*E2(2,2)*E3(1,3)*E4(1,2)+2*E2(1,3)*E3(1,2)*E4(2,2)-2*E2(2,3)*E3(3,1)*E4(3,1)+2*E2(1,2)*E3(1,3)*E4(2,2)+2*E2(3,3)*E3(2,2)*E4(3,2)+2*E2(2,2)*E3(3,3)*E4(3,2)-2*E2(2,3)*E3(3,2)*E4(3,2)-2*E2(3,2)*E3(2,3)*E4(3,2)+2*E2(2,1)*E3(1,1)*E4(1,3)+2*E2(3,2)*E3(3,3)*E4(2,2)+2*E2(2,2)*E3(1,2)*E4(1,3)+2*E2(1,2)*E3(2,2)*E4(1,3)+2*E2(3,3)*E3(3,2)*E4(2,2)+2*E2(3,3)*E3(2,3)*E4(3,3)+2*E2(2,3)*E3(3,3)*E4(3,3)+2*E2(1,3)*E3(2,1)*E4(1,1)-2*E2(3,1)*E3(2,3)*E4(3,1)-2*E2(1,2)*E3(2,3)*E4(1,2)+2*E2(1,1)*E3(1,3)*E4(2,1)+2*E2(3,1)*E3(2,1)*E4(3,3)+2*E2(2,1)*E3(3,1)*E4(3,3)+6*E2(2,3)*E3(2,3)*E4(2,3)+2*E2(3,2)*E3(2,2)*E4(3,3)+2*E2(2,2)*E3(3,2)*E4(3,3)+2*E2(1,3)*E3(1,3)*E4(2,3)+2*E2(3,3)*E3(3,3)*E4(2,3)+2*E2(2,1)*E3(3,3)*E4(3,1)+2*E2(2,3)*E3(1,3)*E4(1,3)+2*E2(3,3)*E3(2,1)*E4(3,1)+2*E2(3,1)*E3(3,3)*E4(2,1)+2*E2(1,3)*E3(2,3)*E4(1,3)+2*E2(3,3)*E3(3,1)*E4(2,1)+2*E2(2,2)*E3(2,3)*E4(2,2)+2*E2(2,3)*E3(2,1)*E4(2,1)+2*E2(2,1)*E3(2,3)*E4(2,1)-2*E2(3,2)*E3(3,2)*E4(2,3)-2*E2(3,1)*E3(3,1)*E4(2,3)+2*E2(2,3)*E3(2,2)*E4(2,2)+2*E2(2,2)*E3(2,2)*E4(2,3)+2*E2(2,1)*E3(2,1)*E4(2,3)-2*E2(1,1)*E3(1,1)*E4(2,3)+2*E2(1,3)*E3(2,2)*E4(1,2)+2*E2(1,3)*E3(1,1)*E4(2,1)-2*E2(1,2)*E3(1,2)*E4(2,3)+2*E2(1,1)*E3(2,1)*E4(1,3)+2*E2(2,1)*E3(1,3)*E4(1,1)-2*E2(2,3)*E3(1,2)*E4(1,2);
Mcoefs(150) = -2*E2(3,3)*E3(1,1)*E4(1,1)+2*E2(2,2)*E3(3,2)*E4(2,3)+2*E2(3,3)*E3(2,3)*E4(2,3)+2*E2(1,3)*E3(3,3)*E4(1,3)+2*E2(1,3)*E3(3,1)*E4(1,1)-2*E2(2,2)*E3(2,2)*E4(3,3)+2*E2(3,1)*E3(3,1)*E4(3,3)+2*E2(3,2)*E3(3,2)*E4(3,3)+2*E2(1,3)*E3(1,3)*E4(3,3)+2*E2(3,3)*E3(1,3)*E4(1,3)+2*E2(2,3)*E3(3,1)*E4(2,1)+2*E2(2,1)*E3(2,3)*E4(3,1)+2*E2(2,3)*E3(2,1)*E4(3,1)+2*E2(3,1)*E3(2,3)*E4(2,1)-2*E2(2,1)*E3(2,1)*E4(3,3)-2*E2(1,2)*E3(1,2)*E4(3,3)+6*E2(3,3)*E3(3,3)*E4(3,3)-2*E2(1,1)*E3(1,1)*E4(3,3)+2*E2(3,3)*E3(3,2)*E4(3,2)+2*E2(3,2)*E3(3,3)*E4(3,2)+2*E2(3,1)*E3(3,3)*E4(3,1)+2*E2(3,3)*E3(3,1)*E4(3,1)-2*E2(3,3)*E3(2,1)*E4(2,1)-2*E2(2,2)*E3(3,3)*E4(2,2)-2*E2(3,3)*E3(2,2)*E4(2,2)+2*E2(2,3)*E3(2,3)*E4(3,3)+2*E2(2,1)*E3(3,1)*E4(2,3)+2*E2(3,1)*E3(2,1)*E4(2,3)+2*E2(2,3)*E3(3,2)*E4(2,2)+2*E2(3,2)*E3(1,2)*E4(1,3)+2*E2(2,2)*E3(2,3)*E4(3,2)+2*E2(2,3)*E3(2,2)*E4(3,2)+2*E2(3,2)*E3(2,3)*E4(2,2)+2*E2(2,3)*E3(3,3)*E4(2,3)+2*E2(3,1)*E3(1,1)*E4(1,3)+2*E2(1,2)*E3(3,2)*E4(1,3)+2*E2(1,3)*E3(3,2)*E4(1,2)+2*E2(1,3)*E3(1,1)*E4(3,1)+2*E2(1,1)*E3(1,3)*E4(3,1)+2*E2(1,1)*E3(3,1)*E4(1,3)+2*E2(1,2)*E3(1,3)*E4(3,2)+2*E2(1,3)*E3(1,2)*E4(3,2)+2*E2(3,1)*E3(1,3)*E4(1,1)-2*E2(1,2)*E3(3,3)*E4(1,2)+2*E2(3,2)*E3(2,2)*E4(2,3)-2*E2(3,3)*E3(1,2)*E4(1,2)-2*E2(2,1)*E3(3,3)*E4(2,1)-2*E2(1,1)*E3(3,3)*E4(1,1)+2*E2(3,2)*E3(1,3)*E4(1,2);
Mcoefs(151) = -E3(2,1)*E3(3,3)*E4(1,2)-E3(2,1)*E3(1,2)*E4(3,3)-E3(1,2)*E3(3,3)*E4(2,1)-E3(3,1)*E3(1,3)*E4(2,2)-E3(3,1)*E3(2,2)*E4(1,3)-E3(1,1)*E3(3,2)*E4(2,3)-E3(1,1)*E3(2,3)*E4(3,2)+E3(1,1)*E3(3,3)*E4(2,2)+E3(1,1)*E3(2,2)*E4(3,3)+E3(2,2)*E3(3,3)*E4(1,1)-E3(3,2)*E3(2,3)*E4(1,1)+E3(2,1)*E3(3,2)*E4(1,3)+E3(2,1)*E3(1,3)*E4(3,2)+E3(1,2)*E3(2,3)*E4(3,1)-E3(2,2)*E3(1,3)*E4(3,1)+E3(3,1)*E3(1,2)*E4(2,3)+E3(3,2)*E3(1,3)*E4(2,1)+E3(3,1)*E3(2,3)*E4(1,2);
Mcoefs(152) = -E3(2,2)^2*E4(1,1)+2*E3(1,1)*E3(2,1)*E4(2,1)-2*E3(1,1)*E3(2,2)*E4(2,2)-2*E3(1,1)*E3(3,3)*E4(3,3)+2*E3(1,1)*E3(3,1)*E4(3,1)+2*E3(1,2)*E3(3,2)*E4(3,1)+2*E3(1,3)*E3(3,3)*E4(3,1)-2*E3(1,1)*E3(3,2)*E4(3,2)+2*E3(3,1)*E3(3,2)*E4(1,2)+2*E3(3,1)*E3(1,2)*E4(3,2)+2*E3(3,1)*E3(3,3)*E4(1,3)+2*E3(3,1)*E3(1,3)*E4(3,3)+2*E3(1,1)*E3(1,2)*E4(1,2)-2*E3(1,1)*E3(2,3)*E4(2,3)+2*E3(1,1)*E3(1,3)*E4(1,3)+2*E3(2,1)*E3(2,3)*E4(1,3)-E3(3,2)^2*E4(1,1)-E3(2,3)^2*E4(1,1)+E3(2,1)^2*E4(1,1)-E3(3,3)^2*E4(1,1)+E3(1,3)^2*E4(1,1)+E3(1,2)^2*E4(1,1)+E3(3,1)^2*E4(1,1)+3*E3(1,1)^2*E4(1,1)+2*E3(1,3)*E3(2,3)*E4(2,1)+2*E3(2,1)*E3(2,2)*E4(1,2)+2*E3(2,1)*E3(1,3)*E4(2,3)+2*E3(1,2)*E3(2,2)*E4(2,1)+2*E3(2,1)*E3(1,2)*E4(2,2);
Mcoefs(153) = -E3(3,2)^2*E4(2,1)-E3(1,2)^2*E4(2,1)-E3(1,3)^2*E4(2,1)-E3(3,3)^2*E4(2,1)+3*E3(2,1)^2*E4(2,1)+E3(2,3)^2*E4(2,1)+E3(2,2)^2*E4(2,1)+E3(3,1)^2*E4(2,1)+E3(1,1)^2*E4(2,1)+2*E3(3,1)*E3(3,2)*E4(2,2)+2*E3(3,1)*E3(2,3)*E4(3,3)+2*E3(3,1)*E3(3,3)*E4(2,3)+2*E3(3,1)*E3(2,2)*E4(3,2)+2*E3(1,3)*E3(2,3)*E4(1,1)+2*E3(1,2)*E3(2,2)*E4(1,1)+2*E3(1,1)*E3(2,1)*E4(1,1)+2*E3(2,1)*E3(2,2)*E4(2,2)+2*E3(2,1)*E3(2,3)*E4(2,3)+2*E3(3,2)*E3(2,2)*E4(3,1)+2*E3(1,1)*E3(1,3)*E4(2,3)+2*E3(1,1)*E3(2,3)*E4(1,3)+2*E3(1,1)*E3(2,2)*E4(1,2)+2*E3(2,1)*E3(3,1)*E4(3,1)-2*E3(2,1)*E3(1,2)*E4(1,2)-2*E3(2,1)*E3(3,2)*E4(3,2)-2*E3(2,1)*E3(1,3)*E4(1,3)+2*E3(1,1)*E3(1,2)*E4(2,2)-2*E3(2,1)*E3(3,3)*E4(3,3)+2*E3(2,3)*E3(3,3)*E4(3,1);
Mcoefs(154) = 2*E3(2,1)*E3(2,3)*E4(3,3)+2*E3(1,1)*E3(1,2)*E4(3,2)+2*E3(3,1)*E3(3,2)*E4(3,2)-2*E3(3,1)*E3(2,2)*E4(2,2)+2*E3(1,1)*E3(3,3)*E4(1,3)+2*E3(1,1)*E3(3,2)*E4(1,2)+2*E3(2,1)*E3(3,3)*E4(2,3)+2*E3(2,1)*E3(3,2)*E4(2,2)+2*E3(1,1)*E3(3,1)*E4(1,1)+2*E3(1,2)*E3(3,2)*E4(1,1)+2*E3(2,3)*E3(3,3)*E4(2,1)-2*E3(3,1)*E3(1,3)*E4(1,3)+2*E3(1,3)*E3(3,3)*E4(1,1)+2*E3(2,1)*E3(3,1)*E4(2,1)+2*E3(3,2)*E3(2,2)*E4(2,1)-2*E3(3,1)*E3(1,2)*E4(1,2)+E3(1,1)^2*E4(3,1)+E3(3,3)^2*E4(3,1)-E3(2,2)^2*E4(3,1)-E3(2,3)^2*E4(3,1)-E3(1,3)^2*E4(3,1)-E3(1,2)^2*E4(3,1)+3*E3(3,1)^2*E4(3,1)+E3(3,2)^2*E4(3,1)+E3(2,1)^2*E4(3,1)+2*E3(3,1)*E3(3,3)*E4(3,3)+2*E3(1,1)*E3(1,3)*E4(3,3)-2*E3(3,1)*E3(2,3)*E4(2,3)+2*E3(2,1)*E3(2,2)*E4(3,2);
Mcoefs(155) = -E3(2,3)^2*E4(1,2)-E3(2,1)^2*E4(1,2)-E3(3,1)^2*E4(1,2)+3*E3(1,2)^2*E4(1,2)+E3(1,3)^2*E4(1,2)+E3(2,2)^2*E4(1,2)+E3(3,2)^2*E4(1,2)-E3(3,3)^2*E4(1,2)+E3(1,1)^2*E4(1,2)+2*E3(2,2)*E3(2,3)*E4(1,3)+2*E3(1,2)*E3(2,2)*E4(2,2)+2*E3(2,1)*E3(2,2)*E4(1,1)+2*E3(1,1)*E3(2,2)*E4(2,1)+2*E3(1,1)*E3(1,2)*E4(1,1)+2*E3(1,1)*E3(2,1)*E4(2,2)+2*E3(1,3)*E3(2,3)*E4(2,2)-2*E3(2,1)*E3(1,2)*E4(2,1)-2*E3(1,2)*E3(3,3)*E4(3,3)+2*E3(3,2)*E3(3,3)*E4(1,3)+2*E3(3,2)*E3(1,3)*E4(3,3)-2*E3(1,2)*E3(2,3)*E4(2,3)+2*E3(1,2)*E3(3,2)*E4(3,2)-2*E3(3,1)*E3(1,2)*E4(3,1)+2*E3(1,1)*E3(3,1)*E4(3,2)+2*E3(1,3)*E3(3,3)*E4(3,2)+2*E3(1,2)*E3(1,3)*E4(1,3)+2*E3(3,1)*E3(3,2)*E4(1,1)+2*E3(1,1)*E3(3,2)*E4(3,1)+2*E3(2,2)*E3(1,3)*E4(2,3);
Mcoefs(156) = -2*E3(2,2)*E3(3,3)*E4(3,3)-2*E3(3,1)*E3(2,2)*E4(3,1)+2*E3(3,2)*E3(2,3)*E4(3,3)+2*E3(3,2)*E3(2,2)*E4(3,2)-2*E3(2,2)*E3(1,3)*E4(1,3)+2*E3(2,3)*E3(3,3)*E4(3,2)+2*E3(1,1)*E3(1,2)*E4(2,1)+2*E3(1,2)*E3(2,3)*E4(1,3)+2*E3(1,2)*E3(2,2)*E4(1,2)+2*E3(1,3)*E3(2,3)*E4(1,2)+2*E3(2,1)*E3(3,2)*E4(3,1)+2*E3(3,2)*E3(3,3)*E4(2,3)+2*E3(2,1)*E3(1,2)*E4(1,1)+2*E3(3,1)*E3(3,2)*E4(2,1)+2*E3(2,2)*E3(2,3)*E4(2,3)+2*E3(2,1)*E3(2,2)*E4(2,1)-2*E3(1,1)*E3(2,2)*E4(1,1)+2*E3(1,1)*E3(2,1)*E4(1,2)+2*E3(1,2)*E3(1,3)*E4(2,3)+2*E3(2,1)*E3(3,1)*E4(3,2)-E3(1,3)^2*E4(2,2)-E3(1,1)^2*E4(2,2)-E3(3,3)^2*E4(2,2)+E3(2,3)^2*E4(2,2)-E3(3,1)^2*E4(2,2)+E3(3,2)^2*E4(2,2)+E3(1,2)^2*E4(2,2)+3*E3(2,2)^2*E4(2,2)+E3(2,1)^2*E4(2,2);
Mcoefs(157) = E3(3,1)^2*E4(3,2)-E3(1,3)^2*E4(3,2)-E3(2,1)^2*E4(3,2)-E3(2,3)^2*E4(3,2)+E3(1,2)^2*E4(3,2)+E3(2,2)^2*E4(3,2)+3*E3(3,2)^2*E4(3,2)-E3(1,1)^2*E4(3,2)+E3(3,3)^2*E4(3,2)-2*E3(3,2)*E3(1,3)*E4(1,3)+2*E3(1,1)*E3(3,1)*E4(1,2)-2*E3(2,1)*E3(3,2)*E4(2,1)-2*E3(3,2)*E3(2,3)*E4(2,3)+2*E3(1,2)*E3(1,3)*E4(3,3)-2*E3(1,1)*E3(3,2)*E4(1,1)+2*E3(1,2)*E3(3,3)*E4(1,3)+2*E3(3,1)*E3(1,2)*E4(1,1)+2*E3(1,1)*E3(1,2)*E4(3,1)+2*E3(1,2)*E3(3,2)*E4(1,2)+2*E3(1,3)*E3(3,3)*E4(1,2)+2*E3(3,1)*E3(2,2)*E4(2,1)+2*E3(2,2)*E3(2,3)*E4(3,3)+2*E3(2,2)*E3(3,3)*E4(2,3)+2*E3(3,2)*E3(2,2)*E4(2,2)+2*E3(2,3)*E3(3,3)*E4(2,2)+2*E3(2,1)*E3(2,2)*E4(3,1)+2*E3(2,1)*E3(3,1)*E4(2,2)+2*E3(3,2)*E3(3,3)*E4(3,3)+2*E3(3,1)*E3(3,2)*E4(3,1);
Mcoefs(158) = 2*E3(1,3)*E3(2,3)*E4(2,3)+2*E3(1,1)*E3(1,3)*E4(1,1)+2*E3(1,2)*E3(2,2)*E4(2,3)+E3(3,3)^2*E4(1,3)+E3(1,1)^2*E4(1,3)+E3(2,3)^2*E4(1,3)+E3(1,2)^2*E4(1,3)-E3(2,1)^2*E4(1,3)-E3(2,2)^2*E4(1,3)-E3(3,1)^2*E4(1,3)-E3(3,2)^2*E4(1,3)+3*E3(1,3)^2*E4(1,3)+2*E3(1,1)*E3(2,1)*E4(2,3)+2*E3(1,1)*E3(3,3)*E4(3,1)+2*E3(3,1)*E3(3,3)*E4(1,1)-2*E3(2,2)*E3(1,3)*E4(2,2)-2*E3(2,1)*E3(1,3)*E4(2,1)-2*E3(3,2)*E3(1,3)*E4(3,2)-2*E3(3,1)*E3(1,3)*E4(3,1)+2*E3(1,2)*E3(3,3)*E4(3,2)+2*E3(2,1)*E3(2,3)*E4(1,1)+2*E3(1,1)*E3(2,3)*E4(2,1)+2*E3(1,2)*E3(3,2)*E4(3,3)+2*E3(1,3)*E3(3,3)*E4(3,3)+2*E3(1,1)*E3(3,1)*E4(3,3)+2*E3(3,2)*E3(3,3)*E4(1,2)+2*E3(2,2)*E3(2,3)*E4(1,2)+2*E3(1,2)*E3(2,3)*E4(2,2)+2*E3(1,2)*E3(1,3)*E4(1,2);
Mcoefs(159) = E3(3,3)^2*E4(2,3)-E3(3,1)^2*E4(2,3)-E3(3,2)^2*E4(2,3)-E3(1,1)^2*E4(2,3)+E3(2,1)^2*E4(2,3)+E3(2,2)^2*E4(2,3)-E3(1,2)^2*E4(2,3)+E3(1,3)^2*E4(2,3)+3*E3(2,3)^2*E4(2,3)-2*E3(1,1)*E3(2,3)*E4(1,1)+2*E3(2,2)*E3(2,3)*E4(2,2)+2*E3(3,2)*E3(3,3)*E4(2,2)+2*E3(2,1)*E3(3,1)*E4(3,3)+2*E3(2,3)*E3(3,3)*E4(3,3)+2*E3(2,1)*E3(2,3)*E4(2,1)+2*E3(2,1)*E3(3,3)*E4(3,1)+2*E3(1,2)*E3(2,2)*E4(1,3)+2*E3(1,2)*E3(1,3)*E4(2,2)+2*E3(1,3)*E3(2,3)*E4(1,3)+2*E3(3,1)*E3(3,3)*E4(2,1)+2*E3(2,2)*E3(3,3)*E4(3,2)+2*E3(1,1)*E3(2,1)*E4(1,3)+2*E3(3,2)*E3(2,2)*E4(3,3)+2*E3(1,1)*E3(1,3)*E4(2,1)+2*E3(2,1)*E3(1,3)*E4(1,1)+2*E3(2,2)*E3(1,3)*E4(1,2)-2*E3(3,2)*E3(2,3)*E4(3,2)-2*E3(3,1)*E3(2,3)*E4(3,1)-2*E3(1,2)*E3(2,3)*E4(1,2);
Mcoefs(160) = 2*E3(3,2)*E3(1,3)*E4(1,2)+2*E3(1,1)*E3(1,3)*E4(3,1)+3*E3(3,3)^2*E4(3,3)+E3(3,2)^2*E4(3,3)+E3(1,3)^2*E4(3,3)+E3(2,3)^2*E4(3,3)-E3(2,2)^2*E4(3,3)-E3(1,1)^2*E4(3,3)-E3(1,2)^2*E4(3,3)-E3(2,1)^2*E4(3,3)+E3(3,1)^2*E4(3,3)-2*E3(1,2)*E3(3,3)*E4(1,2)-2*E3(2,1)*E3(3,3)*E4(2,1)-2*E3(1,1)*E3(3,3)*E4(1,1)+2*E3(2,1)*E3(2,3)*E4(3,1)+2*E3(3,2)*E3(2,2)*E4(2,3)+2*E3(3,1)*E3(2,3)*E4(2,1)+2*E3(1,2)*E3(3,2)*E4(1,3)+2*E3(2,2)*E3(2,3)*E4(3,2)+2*E3(2,1)*E3(3,1)*E4(2,3)+2*E3(3,2)*E3(2,3)*E4(2,2)+2*E3(1,3)*E3(3,3)*E4(1,3)+2*E3(2,3)*E3(3,3)*E4(2,3)+2*E3(1,1)*E3(3,1)*E4(1,3)+2*E3(3,1)*E3(1,3)*E4(1,1)+2*E3(1,2)*E3(1,3)*E4(3,2)-2*E3(2,2)*E3(3,3)*E4(2,2)+2*E3(3,1)*E3(3,3)*E4(3,1)+2*E3(3,2)*E3(3,3)*E4(3,2);
Mcoefs(161) = E1(3,2)*E4(2,1)*E4(1,3)+E1(2,3)*E4(3,1)*E4(1,2)+E1(2,2)*E4(1,1)*E4(3,3)+E1(1,1)*E4(2,2)*E4(3,3)+E1(3,3)*E4(1,1)*E4(2,2)+E1(2,1)*E4(3,2)*E4(1,3)-E1(3,3)*E4(2,1)*E4(1,2)-E1(1,1)*E4(3,2)*E4(2,3)-E1(2,3)*E4(1,1)*E4(3,2)-E1(3,2)*E4(1,1)*E4(2,3)-E1(2,1)*E4(1,2)*E4(3,3)-E1(1,2)*E4(2,1)*E4(3,3)-E1(3,1)*E4(2,2)*E4(1,3)-E1(2,2)*E4(3,1)*E4(1,3)-E1(1,3)*E4(3,1)*E4(2,2)+E1(1,3)*E4(2,1)*E4(3,2)+E1(3,1)*E4(1,2)*E4(2,3)+E1(1,2)*E4(3,1)*E4(2,3);
Mcoefs(162) = E1(1,1)*E4(1,2)^2+E1(1,1)*E4(2,1)^2+E1(1,1)*E4(3,1)^2-E1(1,1)*E4(3,2)^2-E1(1,1)*E4(3,3)^2-E1(1,1)*E4(2,3)^2-E1(1,1)*E4(2,2)^2+3*E1(1,1)*E4(1,1)^2+E1(1,1)*E4(1,3)^2+2*E1(2,3)*E4(2,1)*E4(1,3)+2*E1(2,2)*E4(2,1)*E4(1,2)+2*E1(1,2)*E4(2,1)*E4(2,2)+2*E1(1,3)*E4(2,1)*E4(2,3)+2*E1(2,1)*E4(1,3)*E4(2,3)-2*E1(2,2)*E4(1,1)*E4(2,2)+2*E1(2,1)*E4(1,2)*E4(2,2)+2*E1(2,1)*E4(1,1)*E4(2,1)+2*E1(3,3)*E4(3,1)*E4(1,3)+2*E1(1,3)*E4(3,1)*E4(3,3)+2*E1(1,3)*E4(1,1)*E4(1,3)+2*E1(1,2)*E4(1,1)*E4(1,2)-2*E1(2,3)*E4(1,1)*E4(2,3)-2*E1(3,3)*E4(1,1)*E4(3,3)-2*E1(3,2)*E4(1,1)*E4(3,2)+2*E1(3,1)*E4(1,3)*E4(3,3)+2*E1(3,1)*E4(1,2)*E4(3,2)+2*E1(3,2)*E4(3,1)*E4(1,2)+2*E1(1,2)*E4(3,1)*E4(3,2)+2*E1(3,1)*E4(1,1)*E4(3,1);
Mcoefs(163) = 3*E1(2,1)*E4(2,1)^2+E1(2,1)*E4(1,1)^2+E1(2,1)*E4(3,1)^2+E1(2,1)*E4(2,3)^2-E1(2,1)*E4(1,2)^2-E1(2,1)*E4(3,2)^2-E1(2,1)*E4(1,3)^2-E1(2,1)*E4(3,3)^2+E1(2,1)*E4(2,2)^2-2*E1(3,2)*E4(2,1)*E4(3,2)+2*E1(2,3)*E4(3,1)*E4(3,3)+2*E1(3,1)*E4(2,1)*E4(3,1)+2*E1(2,2)*E4(3,1)*E4(3,2)+2*E1(3,2)*E4(3,1)*E4(2,2)+2*E1(3,1)*E4(3,2)*E4(2,2)+2*E1(3,1)*E4(2,3)*E4(3,3)-2*E1(1,3)*E4(2,1)*E4(1,3)+2*E1(2,2)*E4(2,1)*E4(2,2)+2*E1(2,3)*E4(2,1)*E4(2,3)+2*E1(3,3)*E4(3,1)*E4(2,3)-2*E1(3,3)*E4(2,1)*E4(3,3)+2*E1(1,1)*E4(1,3)*E4(2,3)+2*E1(1,1)*E4(1,2)*E4(2,2)+2*E1(1,3)*E4(1,1)*E4(2,3)-2*E1(1,2)*E4(2,1)*E4(1,2)+2*E1(2,3)*E4(1,1)*E4(1,3)+2*E1(1,2)*E4(1,1)*E4(2,2)+2*E1(2,2)*E4(1,1)*E4(1,2)+2*E1(1,1)*E4(1,1)*E4(2,1);
Mcoefs(164) = E1(3,1)*E4(2,1)^2+E1(3,1)*E4(3,2)^2-E1(3,1)*E4(1,3)^2-E1(3,1)*E4(2,2)^2-E1(3,1)*E4(2,3)^2+3*E1(3,1)*E4(3,1)^2-E1(3,1)*E4(1,2)^2+E1(3,1)*E4(3,3)^2+2*E1(3,3)*E4(3,1)*E4(3,3)+2*E1(1,1)*E4(1,2)*E4(3,2)+2*E1(3,2)*E4(1,1)*E4(1,2)-2*E1(1,2)*E4(3,1)*E4(1,2)+2*E1(1,2)*E4(1,1)*E4(3,2)+2*E1(3,3)*E4(1,1)*E4(1,3)+2*E1(2,1)*E4(2,3)*E4(3,3)+2*E1(3,3)*E4(2,1)*E4(2,3)+2*E1(3,2)*E4(2,1)*E4(2,2)+2*E1(2,1)*E4(2,1)*E4(3,1)+2*E1(2,3)*E4(2,1)*E4(3,3)+2*E1(2,2)*E4(2,1)*E4(3,2)-2*E1(2,2)*E4(3,1)*E4(2,2)-2*E1(2,3)*E4(3,1)*E4(2,3)-2*E1(1,3)*E4(3,1)*E4(1,3)+2*E1(1,1)*E4(1,3)*E4(3,3)+2*E1(2,1)*E4(3,2)*E4(2,2)+2*E1(3,2)*E4(3,1)*E4(3,2)+2*E1(1,3)*E4(1,1)*E4(3,3)+2*E1(1,1)*E4(1,1)*E4(3,1)+E1(3,1)*E4(1,1)^2;
Mcoefs(165) = E1(1,2)*E4(1,1)^2-E1(1,2)*E4(3,3)^2-E1(1,2)*E4(3,1)^2-E1(1,2)*E4(2,3)^2-E1(1,2)*E4(2,1)^2+3*E1(1,2)*E4(1,2)^2+E1(1,2)*E4(3,2)^2+E1(1,2)*E4(2,2)^2+E1(1,2)*E4(1,3)^2+2*E1(3,2)*E4(1,2)*E4(3,2)-2*E1(3,1)*E4(3,1)*E4(1,2)-2*E1(2,3)*E4(1,2)*E4(2,3)-2*E1(3,3)*E4(1,2)*E4(3,3)+2*E1(2,2)*E4(1,1)*E4(2,1)-2*E1(2,1)*E4(2,1)*E4(1,2)+2*E1(2,2)*E4(1,3)*E4(2,3)+2*E1(1,1)*E4(1,1)*E4(1,2)+2*E1(1,3)*E4(2,2)*E4(2,3)+2*E1(2,2)*E4(1,2)*E4(2,2)+2*E1(2,1)*E4(1,1)*E4(2,2)+2*E1(1,3)*E4(1,2)*E4(1,3)+2*E1(1,1)*E4(2,1)*E4(2,2)+2*E1(2,3)*E4(2,2)*E4(1,3)+2*E1(3,3)*E4(3,2)*E4(1,3)+2*E1(1,3)*E4(3,2)*E4(3,3)+2*E1(3,2)*E4(1,1)*E4(3,1)+2*E1(3,2)*E4(1,3)*E4(3,3)+2*E1(3,1)*E4(1,1)*E4(3,2)+2*E1(1,1)*E4(3,1)*E4(3,2);
Mcoefs(166) = 3*E1(2,2)*E4(2,2)^2+E1(2,2)*E4(2,3)^2+E1(2,2)*E4(2,1)^2+2*E1(1,3)*E4(1,2)*E4(2,3)+2*E1(3,2)*E4(2,3)*E4(3,3)+2*E1(2,1)*E4(3,1)*E4(3,2)+2*E1(3,1)*E4(2,1)*E4(3,2)+2*E1(3,3)*E4(3,2)*E4(2,3)-2*E1(1,1)*E4(1,1)*E4(2,2)-2*E1(3,3)*E4(2,2)*E4(3,3)-2*E1(1,3)*E4(2,2)*E4(1,3)-2*E1(3,1)*E4(3,1)*E4(2,2)+2*E1(2,3)*E4(3,2)*E4(3,3)+2*E1(3,2)*E4(3,2)*E4(2,2)+2*E1(1,2)*E4(1,1)*E4(2,1)+2*E1(3,2)*E4(2,1)*E4(3,1)+2*E1(1,1)*E4(2,1)*E4(1,2)+2*E1(2,3)*E4(1,2)*E4(1,3)+2*E1(2,1)*E4(1,1)*E4(1,2)+2*E1(1,2)*E4(1,2)*E4(2,2)+2*E1(1,2)*E4(1,3)*E4(2,3)+2*E1(2,3)*E4(2,2)*E4(2,3)-E1(2,2)*E4(3,3)^2-E1(2,2)*E4(1,3)^2-E1(2,2)*E4(1,1)^2+2*E1(2,1)*E4(2,1)*E4(2,2)+E1(2,2)*E4(3,2)^2+E1(2,2)*E4(1,2)^2-E1(2,2)*E4(3,1)^2;
Mcoefs(167) = E1(3,2)*E4(3,1)^2-E1(3,2)*E4(1,1)^2-E1(3,2)*E4(2,3)^2+3*E1(3,2)*E4(3,2)^2+E1(3,2)*E4(1,2)^2+E1(3,2)*E4(2,2)^2-E1(3,2)*E4(2,1)^2-E1(3,2)*E4(1,3)^2+E1(3,2)*E4(3,3)^2-2*E1(2,1)*E4(2,1)*E4(3,2)+2*E1(1,2)*E4(1,1)*E4(3,1)+2*E1(1,2)*E4(1,3)*E4(3,3)+2*E1(3,3)*E4(1,2)*E4(1,3)+2*E1(3,3)*E4(3,2)*E4(3,3)-2*E1(1,1)*E4(1,1)*E4(3,2)-2*E1(1,3)*E4(3,2)*E4(1,3)-2*E1(2,3)*E4(3,2)*E4(2,3)+2*E1(2,3)*E4(2,2)*E4(3,3)+2*E1(2,2)*E4(3,2)*E4(2,2)+2*E1(3,3)*E4(2,2)*E4(2,3)+2*E1(3,1)*E4(3,1)*E4(3,2)+2*E1(2,2)*E4(2,1)*E4(3,1)+2*E1(2,1)*E4(3,1)*E4(2,2)+2*E1(3,1)*E4(2,1)*E4(2,2)+2*E1(2,2)*E4(2,3)*E4(3,3)+2*E1(1,2)*E4(1,2)*E4(3,2)+2*E1(1,1)*E4(3,1)*E4(1,2)+2*E1(3,1)*E4(1,1)*E4(1,2)+2*E1(1,3)*E4(1,2)*E4(3,3);
Mcoefs(168) = E1(1,3)*E4(3,3)^2+E1(1,3)*E4(2,3)^2-E1(1,3)*E4(2,2)^2-E1(1,3)*E4(2,1)^2-E1(1,3)*E4(3,2)^2-E1(1,3)*E4(3,1)^2+3*E1(1,3)*E4(1,3)^2+E1(1,3)*E4(1,1)^2+E1(1,3)*E4(1,2)^2+2*E1(2,3)*E4(1,1)*E4(2,1)+2*E1(2,3)*E4(1,2)*E4(2,2)-2*E1(2,1)*E4(2,1)*E4(1,3)+2*E1(2,3)*E4(1,3)*E4(2,3)+2*E1(2,2)*E4(1,2)*E4(2,3)-2*E1(3,2)*E4(3,2)*E4(1,3)+2*E1(1,2)*E4(1,2)*E4(1,3)+2*E1(1,2)*E4(2,2)*E4(2,3)+2*E1(3,3)*E4(1,2)*E4(3,2)+2*E1(3,2)*E4(1,2)*E4(3,3)-2*E1(3,1)*E4(3,1)*E4(1,3)+2*E1(1,1)*E4(1,1)*E4(1,3)+2*E1(3,3)*E4(1,1)*E4(3,1)+2*E1(2,1)*E4(1,1)*E4(2,3)+2*E1(1,1)*E4(2,1)*E4(2,3)-2*E1(2,2)*E4(2,2)*E4(1,3)+2*E1(1,2)*E4(3,2)*E4(3,3)+2*E1(3,3)*E4(1,3)*E4(3,3)+2*E1(1,1)*E4(3,1)*E4(3,3)+2*E1(3,1)*E4(1,1)*E4(3,3);
Mcoefs(169) = 3*E1(2,3)*E4(2,3)^2+2*E1(2,1)*E4(2,1)*E4(2,3)+2*E1(2,2)*E4(2,2)*E4(2,3)+2*E1(2,1)*E4(1,1)*E4(1,3)-2*E1(1,1)*E4(1,1)*E4(2,3)+2*E1(2,2)*E4(1,2)*E4(1,3)+2*E1(1,2)*E4(2,2)*E4(1,3)-2*E1(1,2)*E4(1,2)*E4(2,3)+2*E1(3,1)*E4(2,1)*E4(3,3)+2*E1(2,1)*E4(3,1)*E4(3,3)+2*E1(3,3)*E4(2,3)*E4(3,3)+2*E1(3,2)*E4(2,2)*E4(3,3)-2*E1(3,2)*E4(3,2)*E4(2,3)-2*E1(3,1)*E4(3,1)*E4(2,3)+E1(2,3)*E4(1,3)^2+E1(2,3)*E4(2,2)^2+E1(2,3)*E4(2,1)^2-E1(2,3)*E4(1,1)^2-E1(2,3)*E4(3,2)^2-E1(2,3)*E4(1,2)^2-E1(2,3)*E4(3,1)^2+E1(2,3)*E4(3,3)^2+2*E1(3,3)*E4(2,1)*E4(3,1)+2*E1(1,1)*E4(2,1)*E4(1,3)+2*E1(2,2)*E4(3,2)*E4(3,3)+2*E1(3,3)*E4(3,2)*E4(2,2)+2*E1(1,3)*E4(1,3)*E4(2,3)+2*E1(1,3)*E4(1,2)*E4(2,2)+2*E1(1,3)*E4(1,1)*E4(2,1);
Mcoefs(170) = E1(3,3)*E4(3,1)^2+E1(3,3)*E4(3,2)^2+E1(3,3)*E4(2,3)^2+3*E1(3,3)*E4(3,3)^2+E1(3,3)*E4(1,3)^2-E1(3,3)*E4(2,2)^2-E1(3,3)*E4(2,1)^2-E1(3,3)*E4(1,2)^2+2*E1(3,2)*E4(3,2)*E4(3,3)+2*E1(2,3)*E4(2,3)*E4(3,3)+2*E1(2,2)*E4(3,2)*E4(2,3)+2*E1(1,3)*E4(1,1)*E4(3,1)+2*E1(3,2)*E4(2,2)*E4(2,3)+2*E1(3,1)*E4(3,1)*E4(3,3)-E1(3,3)*E4(1,1)^2-2*E1(2,2)*E4(2,2)*E4(3,3)+2*E1(3,1)*E4(1,1)*E4(1,3)+2*E1(1,2)*E4(3,2)*E4(1,3)-2*E1(1,1)*E4(1,1)*E4(3,3)-2*E1(2,1)*E4(2,1)*E4(3,3)+2*E1(3,2)*E4(1,2)*E4(1,3)+2*E1(2,3)*E4(2,1)*E4(3,1)-2*E1(1,2)*E4(1,2)*E4(3,3)+2*E1(2,3)*E4(3,2)*E4(2,2)+2*E1(3,1)*E4(2,1)*E4(2,3)+2*E1(2,1)*E4(3,1)*E4(2,3)+2*E1(1,1)*E4(3,1)*E4(1,3)+2*E1(1,3)*E4(1,3)*E4(3,3)+2*E1(1,3)*E4(1,2)*E4(3,2);
Mcoefs(171) = -E2(2,1)*E4(1,2)*E4(3,3)-E2(1,2)*E4(2,1)*E4(3,3)-E2(3,1)*E4(2,2)*E4(1,3)-E2(1,3)*E4(3,1)*E4(2,2)-E2(2,2)*E4(3,1)*E4(1,3)+E2(2,3)*E4(3,1)*E4(1,2)+E2(1,2)*E4(3,1)*E4(2,3)+E2(2,1)*E4(3,2)*E4(1,3)+E2(2,2)*E4(1,1)*E4(3,3)+E2(1,3)*E4(2,1)*E4(3,2)+E2(3,3)*E4(1,1)*E4(2,2)+E2(3,2)*E4(2,1)*E4(1,3)+E2(3,1)*E4(1,2)*E4(2,3)+E2(1,1)*E4(2,2)*E4(3,3)-E2(3,3)*E4(2,1)*E4(1,2)-E2(1,1)*E4(3,2)*E4(2,3)-E2(2,3)*E4(1,1)*E4(3,2)-E2(3,2)*E4(1,1)*E4(2,3);
Mcoefs(172) = -E2(1,1)*E4(3,3)^2-E2(1,1)*E4(3,2)^2-E2(1,1)*E4(2,3)^2+E2(1,1)*E4(1,2)^2+E2(1,1)*E4(3,1)^2+E2(1,1)*E4(2,1)^2-E2(1,1)*E4(2,2)^2+3*E2(1,1)*E4(1,1)^2+E2(1,1)*E4(1,3)^2+2*E2(1,3)*E4(1,1)*E4(1,3)+2*E2(1,2)*E4(1,1)*E4(1,2)-2*E2(3,3)*E4(1,1)*E4(3,3)-2*E2(3,2)*E4(1,1)*E4(3,2)+2*E2(3,3)*E4(3,1)*E4(1,3)-2*E2(2,2)*E4(1,1)*E4(2,2)+2*E2(1,2)*E4(3,1)*E4(3,2)+2*E2(1,2)*E4(2,1)*E4(2,2)+2*E2(2,1)*E4(1,3)*E4(2,3)+2*E2(2,1)*E4(1,2)*E4(2,2)+2*E2(2,1)*E4(1,1)*E4(2,1)+2*E2(1,3)*E4(2,1)*E4(2,3)+2*E2(2,2)*E4(2,1)*E4(1,2)-2*E2(2,3)*E4(1,1)*E4(2,3)+2*E2(2,3)*E4(2,1)*E4(1,3)+2*E2(3,1)*E4(1,2)*E4(3,2)+2*E2(3,1)*E4(1,3)*E4(3,3)+2*E2(3,1)*E4(1,1)*E4(3,1)+2*E2(1,3)*E4(3,1)*E4(3,3)+2*E2(3,2)*E4(3,1)*E4(1,2);
Mcoefs(173) = E2(2,1)*E4(1,1)^2+E2(2,1)*E4(2,3)^2+3*E2(2,1)*E4(2,1)^2-E2(2,1)*E4(3,2)^2-E2(2,1)*E4(1,2)^2-E2(2,1)*E4(3,3)^2-E2(2,1)*E4(1,3)^2+E2(2,1)*E4(3,1)^2+E2(2,1)*E4(2,2)^2+2*E2(3,1)*E4(3,2)*E4(2,2)+2*E2(3,1)*E4(2,3)*E4(3,3)+2*E2(1,1)*E4(1,3)*E4(2,3)+2*E2(1,1)*E4(1,2)*E4(2,2)+2*E2(2,3)*E4(1,1)*E4(1,3)+2*E2(1,3)*E4(1,1)*E4(2,3)-2*E2(1,2)*E4(2,1)*E4(1,2)+2*E2(2,2)*E4(1,1)*E4(1,2)+2*E2(1,2)*E4(1,1)*E4(2,2)+2*E2(1,1)*E4(1,1)*E4(2,1)-2*E2(3,2)*E4(2,1)*E4(3,2)-2*E2(1,3)*E4(2,1)*E4(1,3)+2*E2(2,2)*E4(2,1)*E4(2,2)+2*E2(2,3)*E4(2,1)*E4(2,3)+2*E2(3,3)*E4(3,1)*E4(2,3)-2*E2(3,3)*E4(2,1)*E4(3,3)+2*E2(2,3)*E4(3,1)*E4(3,3)+2*E2(3,1)*E4(2,1)*E4(3,1)+2*E2(2,2)*E4(3,1)*E4(3,2)+2*E2(3,2)*E4(3,1)*E4(2,2);
Mcoefs(174) = E2(3,1)*E4(1,1)^2-E2(3,1)*E4(1,3)^2-E2(3,1)*E4(2,2)^2-E2(3,1)*E4(2,3)^2-E2(3,1)*E4(1,2)^2+3*E2(3,1)*E4(3,1)^2+E2(3,1)*E4(3,2)^2+E2(3,1)*E4(3,3)^2+E2(3,1)*E4(2,1)^2+2*E2(3,2)*E4(1,1)*E4(1,2)-2*E2(2,3)*E4(3,1)*E4(2,3)+2*E2(2,1)*E4(2,3)*E4(3,3)+2*E2(3,2)*E4(2,1)*E4(2,2)+2*E2(2,1)*E4(2,1)*E4(3,1)+2*E2(2,2)*E4(2,1)*E4(3,2)+2*E2(3,3)*E4(2,1)*E4(2,3)+2*E2(2,3)*E4(2,1)*E4(3,3)-2*E2(2,2)*E4(3,1)*E4(2,2)+2*E2(1,1)*E4(1,3)*E4(3,3)+2*E2(1,3)*E4(1,1)*E4(3,3)+2*E2(2,1)*E4(3,2)*E4(2,2)+2*E2(3,2)*E4(3,1)*E4(3,2)+2*E2(3,3)*E4(1,1)*E4(1,3)+2*E2(1,1)*E4(1,1)*E4(3,1)+2*E2(1,2)*E4(1,1)*E4(3,2)-2*E2(1,3)*E4(3,1)*E4(1,3)-2*E2(1,2)*E4(3,1)*E4(1,2)+2*E2(3,3)*E4(3,1)*E4(3,3)+2*E2(1,1)*E4(1,2)*E4(3,2);
Mcoefs(175) = E2(1,2)*E4(1,1)^2-E2(1,2)*E4(3,3)^2-E2(1,2)*E4(3,1)^2-E2(1,2)*E4(2,3)^2-E2(1,2)*E4(2,1)^2+E2(1,2)*E4(2,2)^2+3*E2(1,2)*E4(1,2)^2+E2(1,2)*E4(3,2)^2+E2(1,2)*E4(1,3)^2+2*E2(3,2)*E4(1,2)*E4(3,2)+2*E2(3,2)*E4(1,1)*E4(3,1)+2*E2(3,2)*E4(1,3)*E4(3,3)+2*E2(3,1)*E4(1,1)*E4(3,2)-2*E2(3,1)*E4(3,1)*E4(1,2)+2*E2(1,1)*E4(3,1)*E4(3,2)-2*E2(2,3)*E4(1,2)*E4(2,3)-2*E2(2,1)*E4(2,1)*E4(1,2)-2*E2(3,3)*E4(1,2)*E4(3,3)+2*E2(2,2)*E4(1,1)*E4(2,1)+2*E2(2,2)*E4(1,3)*E4(2,3)+2*E2(1,3)*E4(1,2)*E4(1,3)+2*E2(1,3)*E4(2,2)*E4(2,3)+2*E2(2,2)*E4(1,2)*E4(2,2)+2*E2(2,1)*E4(1,1)*E4(2,2)+2*E2(1,1)*E4(1,1)*E4(1,2)+2*E2(1,1)*E4(2,1)*E4(2,2)+2*E2(2,3)*E4(2,2)*E4(1,3)+2*E2(3,3)*E4(3,2)*E4(1,3)+2*E2(1,3)*E4(3,2)*E4(3,3);
Mcoefs(176) = 3*E2(2,2)*E4(2,2)^2-E2(2,2)*E4(3,3)^2-E2(2,2)*E4(3,1)^2-E2(2,2)*E4(1,3)^2-E2(2,2)*E4(1,1)^2+E2(2,2)*E4(2,1)^2+E2(2,2)*E4(2,3)^2+E2(2,2)*E4(3,2)^2+E2(2,2)*E4(1,2)^2-2*E2(1,1)*E4(1,1)*E4(2,2)+2*E2(3,2)*E4(2,3)*E4(3,3)+2*E2(1,2)*E4(1,2)*E4(2,2)+2*E2(1,2)*E4(1,3)*E4(2,3)+2*E2(2,3)*E4(2,2)*E4(2,3)+2*E2(3,2)*E4(2,1)*E4(3,1)+2*E2(2,1)*E4(2,1)*E4(2,2)+2*E2(3,1)*E4(2,1)*E4(3,2)+2*E2(2,1)*E4(3,1)*E4(3,2)+2*E2(3,3)*E4(3,2)*E4(2,3)-2*E2(3,3)*E4(2,2)*E4(3,3)-2*E2(3,1)*E4(3,1)*E4(2,2)-2*E2(1,3)*E4(2,2)*E4(1,3)+2*E2(2,3)*E4(3,2)*E4(3,3)+2*E2(3,2)*E4(3,2)*E4(2,2)+2*E2(1,2)*E4(1,1)*E4(2,1)+2*E2(1,3)*E4(1,2)*E4(2,3)+2*E2(2,1)*E4(1,1)*E4(1,2)+2*E2(1,1)*E4(2,1)*E4(1,2)+2*E2(2,3)*E4(1,2)*E4(1,3);
Mcoefs(177) = 2*E2(2,2)*E4(2,1)*E4(3,1)+2*E2(2,2)*E4(2,3)*E4(3,3)-2*E2(2,3)*E4(3,2)*E4(2,3)+2*E2(3,3)*E4(2,2)*E4(2,3)-2*E2(1,1)*E4(1,1)*E4(3,2)+2*E2(2,3)*E4(2,2)*E4(3,3)+2*E2(1,3)*E4(1,2)*E4(3,3)-2*E2(1,3)*E4(3,2)*E4(1,3)+2*E2(3,3)*E4(1,2)*E4(1,3)+2*E2(2,2)*E4(3,2)*E4(2,2)+2*E2(3,1)*E4(1,1)*E4(1,2)+2*E2(3,3)*E4(3,2)*E4(3,3)+2*E2(2,1)*E4(3,1)*E4(2,2)+2*E2(3,1)*E4(2,1)*E4(2,2)-E2(3,2)*E4(2,1)^2-E2(3,2)*E4(1,3)^2+E2(3,2)*E4(2,2)^2-E2(3,2)*E4(2,3)^2+E2(3,2)*E4(1,2)^2-E2(3,2)*E4(1,1)^2+E2(3,2)*E4(3,1)^2+E2(3,2)*E4(3,3)^2+3*E2(3,2)*E4(3,2)^2+2*E2(3,1)*E4(3,1)*E4(3,2)+2*E2(1,2)*E4(1,3)*E4(3,3)+2*E2(1,2)*E4(1,1)*E4(3,1)-2*E2(2,1)*E4(2,1)*E4(3,2)+2*E2(1,1)*E4(3,1)*E4(1,2)+2*E2(1,2)*E4(1,2)*E4(3,2);
Mcoefs(178) = 2*E2(1,2)*E4(1,2)*E4(1,3)+2*E2(2,3)*E4(1,2)*E4(2,2)+2*E2(2,3)*E4(1,3)*E4(2,3)-2*E2(2,2)*E4(2,2)*E4(1,3)+2*E2(1,2)*E4(3,2)*E4(3,3)+2*E2(1,1)*E4(1,1)*E4(1,3)+2*E2(3,1)*E4(1,1)*E4(3,3)+2*E2(1,1)*E4(3,1)*E4(3,3)+2*E2(1,2)*E4(2,2)*E4(2,3)+2*E2(2,2)*E4(1,2)*E4(2,3)-2*E2(2,1)*E4(2,1)*E4(1,3)-2*E2(3,2)*E4(3,2)*E4(1,3)+2*E2(1,1)*E4(2,1)*E4(2,3)+2*E2(2,1)*E4(1,1)*E4(2,3)-2*E2(3,1)*E4(3,1)*E4(1,3)+2*E2(3,3)*E4(1,1)*E4(3,1)+2*E2(3,2)*E4(1,2)*E4(3,3)+2*E2(2,3)*E4(1,1)*E4(2,1)+2*E2(3,3)*E4(1,3)*E4(3,3)+2*E2(3,3)*E4(1,2)*E4(3,2)+E2(1,3)*E4(2,3)^2+E2(1,3)*E4(3,3)^2+E2(1,3)*E4(1,1)^2+E2(1,3)*E4(1,2)^2-E2(1,3)*E4(2,2)^2+3*E2(1,3)*E4(1,3)^2-E2(1,3)*E4(2,1)^2-E2(1,3)*E4(3,2)^2-E2(1,3)*E4(3,1)^2;
Mcoefs(179) = 2*E2(2,1)*E4(1,1)*E4(1,3)+2*E2(1,3)*E4(1,1)*E4(2,1)-2*E2(3,2)*E4(3,2)*E4(2,3)+2*E2(1,3)*E4(1,3)*E4(2,3)-E2(2,3)*E4(1,1)^2-E2(2,3)*E4(1,2)^2-E2(2,3)*E4(3,2)^2-E2(2,3)*E4(3,1)^2+E2(2,3)*E4(3,3)^2+E2(2,3)*E4(1,3)^2+E2(2,3)*E4(2,1)^2+E2(2,3)*E4(2,2)^2+3*E2(2,3)*E4(2,3)^2+2*E2(2,1)*E4(2,1)*E4(2,3)+2*E2(2,2)*E4(1,2)*E4(1,3)+2*E2(1,3)*E4(1,2)*E4(2,2)-2*E2(1,1)*E4(1,1)*E4(2,3)-2*E2(1,2)*E4(1,2)*E4(2,3)-2*E2(3,1)*E4(3,1)*E4(2,3)+2*E2(2,2)*E4(3,2)*E4(3,3)+2*E2(1,2)*E4(2,2)*E4(1,3)+2*E2(2,2)*E4(2,2)*E4(2,3)+2*E2(3,2)*E4(2,2)*E4(3,3)+2*E2(1,1)*E4(2,1)*E4(1,3)+2*E2(3,3)*E4(3,2)*E4(2,2)+2*E2(3,3)*E4(2,1)*E4(3,1)+2*E2(2,1)*E4(3,1)*E4(3,3)+2*E2(3,1)*E4(2,1)*E4(3,3)+2*E2(3,3)*E4(2,3)*E4(3,3);
Mcoefs(180) = -E2(3,3)*E4(2,1)^2+E2(3,3)*E4(3,1)^2+3*E2(3,3)*E4(3,3)^2+E2(3,3)*E4(3,2)^2+2*E2(3,1)*E4(3,1)*E4(3,3)+2*E2(3,2)*E4(3,2)*E4(3,3)+2*E2(2,2)*E4(3,2)*E4(2,3)+2*E2(1,3)*E4(1,3)*E4(3,3)-2*E2(1,1)*E4(1,1)*E4(3,3)+2*E2(3,2)*E4(2,2)*E4(2,3)+2*E2(3,1)*E4(2,1)*E4(2,3)+2*E2(1,3)*E4(1,1)*E4(3,1)+2*E2(1,1)*E4(3,1)*E4(1,3)+2*E2(2,3)*E4(2,1)*E4(3,1)+2*E2(2,3)*E4(2,3)*E4(3,3)+2*E2(1,2)*E4(3,2)*E4(1,3)+2*E2(3,2)*E4(1,2)*E4(1,3)+2*E2(3,1)*E4(1,1)*E4(1,3)+2*E2(1,3)*E4(1,2)*E4(3,2)+2*E2(2,1)*E4(3,1)*E4(2,3)-2*E2(2,1)*E4(2,1)*E4(3,3)-2*E2(2,2)*E4(2,2)*E4(3,3)-2*E2(1,2)*E4(1,2)*E4(3,3)+2*E2(2,3)*E4(3,2)*E4(2,2)-E2(3,3)*E4(1,2)^2-E2(3,3)*E4(2,2)^2-E2(3,3)*E4(1,1)^2+E2(3,3)*E4(1,3)^2+E2(3,3)*E4(2,3)^2;
Mcoefs(181) = E3(2,2)*E4(1,1)*E4(3,3)-E3(2,2)*E4(3,1)*E4(1,3)+E3(3,2)*E4(2,1)*E4(1,3)-E3(3,2)*E4(1,1)*E4(2,3)-E3(1,2)*E4(2,1)*E4(3,3)+E3(2,1)*E4(3,2)*E4(1,3)-E3(1,3)*E4(3,1)*E4(2,2)+E3(1,2)*E4(3,1)*E4(2,3)+E3(1,1)*E4(2,2)*E4(3,3)-E3(1,1)*E4(3,2)*E4(2,3)+E3(2,3)*E4(3,1)*E4(1,2)+E3(1,3)*E4(2,1)*E4(3,2)-E3(2,3)*E4(1,1)*E4(3,2)-E3(2,1)*E4(1,2)*E4(3,3)-E3(3,1)*E4(2,2)*E4(1,3)+E3(3,3)*E4(1,1)*E4(2,2)-E3(3,3)*E4(2,1)*E4(1,2)+E3(3,1)*E4(1,2)*E4(2,3);
Mcoefs(182) = E3(1,1)*E4(1,3)^2+2*E3(2,3)*E4(2,1)*E4(1,3)+2*E3(1,3)*E4(2,1)*E4(2,3)+2*E3(2,2)*E4(2,1)*E4(1,2)+2*E3(1,2)*E4(2,1)*E4(2,2)+2*E3(2,1)*E4(1,3)*E4(2,3)+2*E3(2,1)*E4(1,2)*E4(2,2)+2*E3(2,1)*E4(1,1)*E4(2,1)+E3(1,1)*E4(3,1)^2+2*E3(1,3)*E4(1,1)*E4(1,3)+2*E3(1,2)*E4(1,1)*E4(1,2)-2*E3(3,3)*E4(1,1)*E4(3,3)-2*E3(3,2)*E4(1,1)*E4(3,2)+E3(1,1)*E4(2,1)^2-E3(1,1)*E4(2,3)^2+2*E3(3,3)*E4(3,1)*E4(1,3)+2*E3(1,3)*E4(3,1)*E4(3,3)+2*E3(3,2)*E4(3,1)*E4(1,2)+2*E3(1,2)*E4(3,1)*E4(3,2)+2*E3(3,1)*E4(1,1)*E4(3,1)+2*E3(3,1)*E4(1,3)*E4(3,3)+2*E3(3,1)*E4(1,2)*E4(3,2)+E3(1,1)*E4(1,2)^2-2*E3(2,3)*E4(1,1)*E4(2,3)-E3(1,1)*E4(2,2)^2+3*E3(1,1)*E4(1,1)^2-E3(1,1)*E4(3,3)^2-E3(1,1)*E4(3,2)^2-2*E3(2,2)*E4(1,1)*E4(2,2);
Mcoefs(183) = E3(2,1)*E4(2,2)^2+E3(2,1)*E4(3,1)^2-E3(2,1)*E4(3,3)^2+E3(2,1)*E4(1,1)^2+2*E3(3,1)*E4(2,1)*E4(3,1)-2*E3(3,3)*E4(2,1)*E4(3,3)+2*E3(3,2)*E4(3,1)*E4(2,2)+2*E3(3,3)*E4(3,1)*E4(2,3)+2*E3(2,3)*E4(3,1)*E4(3,3)-2*E3(3,2)*E4(2,1)*E4(3,2)+2*E3(2,3)*E4(2,1)*E4(2,3)+2*E3(2,2)*E4(2,1)*E4(2,2)-2*E3(1,3)*E4(2,1)*E4(1,3)+E3(2,1)*E4(2,3)^2+2*E3(2,2)*E4(3,1)*E4(3,2)+2*E3(2,3)*E4(1,1)*E4(1,3)+2*E3(1,1)*E4(1,2)*E4(2,2)+2*E3(1,1)*E4(1,3)*E4(2,3)+2*E3(3,1)*E4(2,3)*E4(3,3)+2*E3(3,1)*E4(3,2)*E4(2,2)+2*E3(1,2)*E4(1,1)*E4(2,2)+2*E3(2,2)*E4(1,1)*E4(1,2)-2*E3(1,2)*E4(2,1)*E4(1,2)+2*E3(1,3)*E4(1,1)*E4(2,3)-E3(2,1)*E4(3,2)^2+2*E3(1,1)*E4(1,1)*E4(2,1)+3*E3(2,1)*E4(2,1)^2-E3(2,1)*E4(1,3)^2-E3(2,1)*E4(1,2)^2;
Mcoefs(184) = -E3(3,1)*E4(1,3)^2-E3(3,1)*E4(2,3)^2-E3(3,1)*E4(2,2)^2+2*E3(1,1)*E4(1,3)*E4(3,3)+3*E3(3,1)*E4(3,1)^2+E3(3,1)*E4(1,1)^2-E3(3,1)*E4(1,2)^2+2*E3(2,1)*E4(3,2)*E4(2,2)+2*E3(2,1)*E4(2,1)*E4(3,1)+2*E3(3,2)*E4(3,1)*E4(3,2)-2*E3(2,2)*E4(3,1)*E4(2,2)-2*E3(1,2)*E4(3,1)*E4(1,2)+E3(3,1)*E4(2,1)^2+2*E3(3,3)*E4(3,1)*E4(3,3)+E3(3,1)*E4(3,2)^2+2*E3(3,2)*E4(1,1)*E4(1,2)+2*E3(3,3)*E4(1,1)*E4(1,3)+2*E3(3,2)*E4(2,1)*E4(2,2)+2*E3(2,1)*E4(2,3)*E4(3,3)+2*E3(2,3)*E4(2,1)*E4(3,3)+2*E3(2,2)*E4(2,1)*E4(3,2)+E3(3,1)*E4(3,3)^2+2*E3(1,1)*E4(1,2)*E4(3,2)+2*E3(1,2)*E4(1,1)*E4(3,2)+2*E3(1,3)*E4(1,1)*E4(3,3)+2*E3(1,1)*E4(1,1)*E4(3,1)-2*E3(1,3)*E4(3,1)*E4(1,3)-2*E3(2,3)*E4(3,1)*E4(2,3)+2*E3(3,3)*E4(2,1)*E4(2,3);
Mcoefs(185) = -2*E3(2,1)*E4(2,1)*E4(1,2)-2*E3(3,3)*E4(1,2)*E4(3,3)+2*E3(2,2)*E4(1,1)*E4(2,1)+2*E3(2,2)*E4(1,3)*E4(2,3)-E3(1,2)*E4(2,1)^2+3*E3(1,2)*E4(1,2)^2+E3(1,2)*E4(3,2)^2+E3(1,2)*E4(2,2)^2+E3(1,2)*E4(1,1)^2-2*E3(2,3)*E4(1,2)*E4(2,3)+2*E3(1,1)*E4(3,1)*E4(3,2)-E3(1,2)*E4(3,3)^2-E3(1,2)*E4(2,3)^2+2*E3(3,2)*E4(1,1)*E4(3,1)+2*E3(3,2)*E4(1,3)*E4(3,3)+2*E3(3,1)*E4(1,1)*E4(3,2)+2*E3(1,3)*E4(2,2)*E4(2,3)+2*E3(2,2)*E4(1,2)*E4(2,2)+2*E3(1,1)*E4(1,1)*E4(1,2)+2*E3(1,3)*E4(1,2)*E4(1,3)+2*E3(1,1)*E4(2,1)*E4(2,2)+2*E3(2,3)*E4(2,2)*E4(1,3)+2*E3(3,3)*E4(3,2)*E4(1,3)+2*E3(1,3)*E4(3,2)*E4(3,3)+2*E3(3,2)*E4(1,2)*E4(3,2)+E3(1,2)*E4(1,3)^2-2*E3(3,1)*E4(3,1)*E4(1,2)+2*E3(2,1)*E4(1,1)*E4(2,2)-E3(1,2)*E4(3,1)^2;
Mcoefs(186) = E3(2,2)*E4(2,3)^2+2*E3(1,3)*E4(1,2)*E4(2,3)+2*E3(1,2)*E4(1,1)*E4(2,1)+2*E3(2,1)*E4(1,1)*E4(1,2)+2*E3(1,1)*E4(2,1)*E4(1,2)+2*E3(2,3)*E4(1,2)*E4(1,3)+2*E3(1,2)*E4(1,2)*E4(2,2)+2*E3(1,2)*E4(1,3)*E4(2,3)+2*E3(2,3)*E4(2,2)*E4(2,3)-E3(2,2)*E4(3,3)^2+E3(2,2)*E4(2,1)^2+E3(2,2)*E4(3,2)^2+2*E3(3,2)*E4(2,3)*E4(3,3)-E3(2,2)*E4(1,3)^2-E3(2,2)*E4(3,1)^2-2*E3(3,3)*E4(2,2)*E4(3,3)-2*E3(3,1)*E4(3,1)*E4(2,2)-2*E3(1,3)*E4(2,2)*E4(1,3)-2*E3(1,1)*E4(1,1)*E4(2,2)+2*E3(2,3)*E4(3,2)*E4(3,3)+2*E3(3,2)*E4(3,2)*E4(2,2)+2*E3(2,1)*E4(2,1)*E4(2,2)+E3(2,2)*E4(1,2)^2+2*E3(3,2)*E4(2,1)*E4(3,1)+2*E3(3,1)*E4(2,1)*E4(3,2)+2*E3(2,1)*E4(3,1)*E4(3,2)+2*E3(3,3)*E4(3,2)*E4(2,3)+3*E3(2,2)*E4(2,2)^2-E3(2,2)*E4(1,1)^2;
Mcoefs(187) = E3(3,2)*E4(2,2)^2+E3(3,2)*E4(1,2)^2-E3(3,2)*E4(1,3)^2+2*E3(2,2)*E4(2,1)*E4(3,1)-E3(3,2)*E4(1,1)^2-E3(3,2)*E4(2,3)^2-E3(3,2)*E4(2,1)^2+2*E3(1,2)*E4(1,1)*E4(3,1)+2*E3(1,2)*E4(1,2)*E4(3,2)+2*E3(3,3)*E4(3,2)*E4(3,3)+2*E3(3,1)*E4(3,1)*E4(3,2)+2*E3(1,3)*E4(1,2)*E4(3,3)+2*E3(3,3)*E4(1,2)*E4(1,3)+2*E3(3,1)*E4(1,1)*E4(1,2)-2*E3(2,1)*E4(2,1)*E4(3,2)-2*E3(2,3)*E4(3,2)*E4(2,3)-2*E3(1,1)*E4(1,1)*E4(3,2)-2*E3(1,3)*E4(3,2)*E4(1,3)+2*E3(3,1)*E4(2,1)*E4(2,2)+2*E3(2,2)*E4(3,2)*E4(2,2)+2*E3(2,3)*E4(2,2)*E4(3,3)+2*E3(3,3)*E4(2,2)*E4(2,3)+3*E3(3,2)*E4(3,2)^2+2*E3(2,1)*E4(3,1)*E4(2,2)+E3(3,2)*E4(3,1)^2+E3(3,2)*E4(3,3)^2+2*E3(1,1)*E4(3,1)*E4(1,2)+2*E3(1,2)*E4(1,3)*E4(3,3)+2*E3(2,2)*E4(2,3)*E4(3,3);
Mcoefs(188) = -E3(1,3)*E4(3,1)^2-E3(1,3)*E4(2,2)^2+E3(1,3)*E4(1,1)^2+E3(1,3)*E4(1,2)^2+2*E3(1,1)*E4(3,1)*E4(3,3)+2*E3(3,1)*E4(1,1)*E4(3,3)+2*E3(1,2)*E4(3,2)*E4(3,3)+2*E3(3,3)*E4(1,3)*E4(3,3)-E3(1,3)*E4(3,2)^2+E3(1,3)*E4(2,3)^2+E3(1,3)*E4(3,3)^2+2*E3(1,1)*E4(2,1)*E4(2,3)+2*E3(2,3)*E4(1,1)*E4(2,1)+2*E3(2,3)*E4(1,2)*E4(2,2)+2*E3(1,2)*E4(1,2)*E4(1,3)+2*E3(1,1)*E4(1,1)*E4(1,3)+2*E3(1,2)*E4(2,2)*E4(2,3)+2*E3(2,2)*E4(1,2)*E4(2,3)+2*E3(2,1)*E4(1,1)*E4(2,3)+2*E3(3,3)*E4(1,1)*E4(3,1)+2*E3(3,3)*E4(1,2)*E4(3,2)+2*E3(3,2)*E4(1,2)*E4(3,3)-2*E3(3,1)*E4(3,1)*E4(1,3)-2*E3(3,2)*E4(3,2)*E4(1,3)-2*E3(2,1)*E4(2,1)*E4(1,3)-E3(1,3)*E4(2,1)^2+2*E3(2,3)*E4(1,3)*E4(2,3)+3*E3(1,3)*E4(1,3)^2-2*E3(2,2)*E4(2,2)*E4(1,3);
Mcoefs(189) = -E3(2,3)*E4(1,1)^2+3*E3(2,3)*E4(2,3)^2+2*E3(3,3)*E4(2,3)*E4(3,3)+2*E3(2,1)*E4(3,1)*E4(3,3)+2*E3(3,1)*E4(2,1)*E4(3,3)-E3(2,3)*E4(3,2)^2-E3(2,3)*E4(1,2)^2-E3(2,3)*E4(3,1)^2+2*E3(2,2)*E4(2,2)*E4(2,3)+E3(2,3)*E4(2,2)^2+E3(2,3)*E4(2,1)^2+2*E3(2,1)*E4(1,1)*E4(1,3)+2*E3(1,1)*E4(2,1)*E4(1,3)+2*E3(1,2)*E4(2,2)*E4(1,3)+2*E3(2,2)*E4(1,2)*E4(1,3)+2*E3(2,1)*E4(2,1)*E4(2,3)+E3(2,3)*E4(3,3)^2-2*E3(1,1)*E4(1,1)*E4(2,3)-2*E3(1,2)*E4(1,2)*E4(2,3)-2*E3(3,1)*E4(3,1)*E4(2,3)-2*E3(3,2)*E4(3,2)*E4(2,3)+2*E3(3,3)*E4(2,1)*E4(3,1)+2*E3(3,3)*E4(3,2)*E4(2,2)+2*E3(2,2)*E4(3,2)*E4(3,3)+2*E3(3,2)*E4(2,2)*E4(3,3)+2*E3(1,3)*E4(1,1)*E4(2,1)+2*E3(1,3)*E4(1,2)*E4(2,2)+2*E3(1,3)*E4(1,3)*E4(2,3)+E3(2,3)*E4(1,3)^2;
Mcoefs(190) = 3*E3(3,3)*E4(3,3)^2-E3(3,3)*E4(1,1)^2-E3(3,3)*E4(2,1)^2-E3(3,3)*E4(1,2)^2+2*E3(2,3)*E4(2,3)*E4(3,3)+2*E3(2,3)*E4(3,2)*E4(2,2)+2*E3(2,1)*E4(3,1)*E4(2,3)-E3(3,3)*E4(2,2)^2+2*E3(1,2)*E4(3,2)*E4(1,3)+2*E3(1,3)*E4(1,1)*E4(3,1)+E3(3,3)*E4(1,3)^2+2*E3(1,3)*E4(1,2)*E4(3,2)+2*E3(1,3)*E4(1,3)*E4(3,3)+2*E3(3,1)*E4(3,1)*E4(3,3)+2*E3(3,2)*E4(3,2)*E4(3,3)-2*E3(1,1)*E4(1,1)*E4(3,3)-2*E3(1,2)*E4(1,2)*E4(3,3)-2*E3(2,1)*E4(2,1)*E4(3,3)-2*E3(2,2)*E4(2,2)*E4(3,3)+2*E3(3,1)*E4(1,1)*E4(1,3)+2*E3(3,2)*E4(1,2)*E4(1,3)+2*E3(1,1)*E4(3,1)*E4(1,3)+2*E3(2,3)*E4(2,1)*E4(3,1)+2*E3(3,1)*E4(2,1)*E4(2,3)+2*E3(2,2)*E4(3,2)*E4(2,3)+2*E3(3,2)*E4(2,2)*E4(2,3)+E3(3,3)*E4(2,3)^2+E3(3,3)*E4(3,2)^2+E3(3,3)*E4(3,1)^2;
Mcoefs(191) = E4(2,1)*E4(3,2)*E4(1,3)-E4(2,1)*E4(1,2)*E4(3,3)-E4(1,1)*E4(3,2)*E4(2,3)+E4(1,1)*E4(2,2)*E4(3,3)+E4(3,1)*E4(1,2)*E4(2,3)-E4(3,1)*E4(2,2)*E4(1,3);
Mcoefs(192) = 2*E4(3,1)*E4(1,3)*E4(3,3)+2*E4(2,1)*E4(1,3)*E4(2,3)+2*E4(2,1)*E4(1,2)*E4(2,2)+2*E4(3,1)*E4(1,2)*E4(3,2)-E4(1,1)*E4(2,2)^2+E4(1,1)*E4(2,1)^2+E4(1,1)*E4(1,2)^2+E4(1,1)*E4(1,3)^2-E4(1,1)*E4(3,3)^2+E4(1,1)*E4(3,1)^2-E4(1,1)*E4(3,2)^2-E4(1,1)*E4(2,3)^2+E4(1,1)^3;
Mcoefs(193) = 2*E4(1,1)*E4(1,2)*E4(2,2)+2*E4(1,1)*E4(1,3)*E4(2,3)+E4(2,1)^3+E4(2,1)*E4(2,2)^2+E4(2,1)*E4(2,3)^2+E4(1,1)^2*E4(2,1)+2*E4(3,1)*E4(3,2)*E4(2,2)+2*E4(3,1)*E4(2,3)*E4(3,3)-E4(2,1)*E4(1,2)^2+E4(2,1)*E4(3,1)^2-E4(2,1)*E4(3,2)^2-E4(2,1)*E4(1,3)^2-E4(2,1)*E4(3,3)^2;
Mcoefs(194) = 2*E4(1,1)*E4(1,2)*E4(3,2)+E4(2,1)^2*E4(3,1)+E4(3,1)^3+2*E4(1,1)*E4(1,3)*E4(3,3)+2*E4(2,1)*E4(2,3)*E4(3,3)+2*E4(2,1)*E4(3,2)*E4(2,2)-E4(3,1)*E4(2,3)^2-E4(3,1)*E4(1,2)^2-E4(3,1)*E4(2,2)^2-E4(3,1)*E4(1,3)^2+E4(3,1)*E4(3,2)^2+E4(1,1)^2*E4(3,1)+E4(3,1)*E4(3,3)^2;
Mcoefs(195) = 2*E4(2,2)*E4(1,3)*E4(2,3)+E4(1,2)*E4(3,2)^2+2*E4(3,2)*E4(1,3)*E4(3,3)+2*E4(1,1)*E4(2,1)*E4(2,2)+E4(1,2)*E4(2,2)^2-E4(2,1)^2*E4(1,2)+E4(1,2)*E4(1,3)^2+E4(1,1)^2*E4(1,2)-E4(1,2)*E4(2,3)^2-E4(3,1)^2*E4(1,2)-E4(1,2)*E4(3,3)^2+E4(1,2)^3+2*E4(1,1)*E4(3,1)*E4(3,2);
Mcoefs(196) = 2*E4(1,2)*E4(1,3)*E4(2,3)+2*E4(2,1)*E4(3,1)*E4(3,2)+2*E4(3,2)*E4(2,3)*E4(3,3)-E4(3,1)^2*E4(2,2)-E4(2,2)*E4(3,3)^2+E4(1,2)^2*E4(2,2)+2*E4(1,1)*E4(2,1)*E4(1,2)+E4(3,2)^2*E4(2,2)-E4(2,2)*E4(1,3)^2+E4(2,1)^2*E4(2,2)+E4(2,2)*E4(2,3)^2+E4(2,2)^3-E4(1,1)^2*E4(2,2);
Mcoefs(197) = 2*E4(2,2)*E4(2,3)*E4(3,3)+E4(3,1)^2*E4(3,2)-E4(3,2)*E4(1,3)^2-E4(3,2)*E4(2,3)^2+E4(3,2)*E4(3,3)^2+E4(3,2)^3-E4(2,1)^2*E4(3,2)+E4(1,2)^2*E4(3,2)+2*E4(2,1)*E4(3,1)*E4(2,2)-E4(1,1)^2*E4(3,2)+E4(3,2)*E4(2,2)^2+2*E4(1,1)*E4(3,1)*E4(1,2)+2*E4(1,2)*E4(1,3)*E4(3,3);
Mcoefs(198) = E4(1,3)*E4(2,3)^2+2*E4(1,1)*E4(2,1)*E4(2,3)+2*E4(1,2)*E4(2,2)*E4(2,3)-E4(2,1)^2*E4(1,3)-E4(2,2)^2*E4(1,3)+2*E4(1,1)*E4(3,1)*E4(3,3)+2*E4(1,2)*E4(3,2)*E4(3,3)-E4(3,1)^2*E4(1,3)-E4(3,2)^2*E4(1,3)+E4(1,3)*E4(3,3)^2+E4(1,3)^3+E4(1,1)^2*E4(1,3)+E4(1,2)^2*E4(1,3);
Mcoefs(199) = E4(1,3)^2*E4(2,3)+E4(2,2)^2*E4(2,3)+E4(2,1)^2*E4(2,3)-E4(1,1)^2*E4(2,3)-E4(1,2)^2*E4(2,3)-E4(3,2)^2*E4(2,3)-E4(3,1)^2*E4(2,3)+E4(2,3)*E4(3,3)^2+2*E4(2,1)*E4(3,1)*E4(3,3)+2*E4(3,2)*E4(2,2)*E4(3,3)+2*E4(1,1)*E4(2,1)*E4(1,3)+2*E4(1,2)*E4(2,2)*E4(1,3)+E4(2,3)^3;
Mcoefs(200) = -E4(1,1)^2*E4(3,3)-E4(1,2)^2*E4(3,3)-E4(2,1)^2*E4(3,3)-E4(2,2)^2*E4(3,3)+2*E4(1,2)*E4(3,2)*E4(1,3)+E4(1,3)^2*E4(3,3)+E4(3,2)^2*E4(3,3)+E4(3,1)^2*E4(3,3)+2*E4(1,1)*E4(3,1)*E4(1,3)+E4(3,3)^3+E4(2,3)^2*E4(3,3)+2*E4(2,1)*E4(3,1)*E4(2,3)+2*E4(3,2)*E4(2,2)*E4(2,3);
% x^3
% y^1*x^2
% y^2*x^1
% y^3
% z^1*x^2
% z^1*y^1*x^1
% z^1*y^2
% z^2*x^1
% z^2*y^1
% z^3
% x^2
% y^1*x^1
% y^2
% z^1*x^1
% z^1*y^1
% z^2
% x^1
% y^1
% z^1
% 1
%
D1 = [Mcoefs(:,[1:4,11:13,17:18,20])];
D2 = [zeros(10,4) Mcoefs(:,[5:7,14:15,19])];
D3 = [zeros(10,7) Mcoefs(:,[8:9,16])];
D4 = [zeros(10,9) Mcoefs(:,[10])];
[v, z] = polyeig(D1,D2,D3,D4);
E = [];
for i=1:length(z)
if (isfinite(z(i)) & isreal(z(i)))
Ei = v(8,i)*E1 + v(9,i)*E2 + (v(10,i)*z(i))*E3 + v(10,i)*E4;
E = [E; Ei ];
end
end
end | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/merge2graphs.m | .m | 5,528 | 154 | function GraphAB = merge2graphs(GraphA,GraphB)
commonFrames = intersect(GraphA.frames,GraphB.frames);
[newFramesFromB,indexNewFramesFromB] = setdiff(GraphB.frames,GraphA.frames);
if isempty(commonFrames)
fprintf('no common frames!\n');
GraphAB = [];
return;
end
GraphAB = GraphA;
if isempty(newFramesFromB)
return;
end
% add the non-overlapping frame first
firstCommonFrame = commonFrames(1);
% transform GraphB.Mot and GraphB.Str to be in the same world coordinate system of GraphA
RtBW2AW = concatenateRts(inverseRt(GraphA.Mot(:,:,GraphA.frames==firstCommonFrame)), GraphB.Mot(:,:,GraphB.frames==firstCommonFrame));
GraphB.Str = transformPtsByRt(GraphB.Str, RtBW2AW);
for i=1:length(GraphB.frames)
GraphB.Mot(:,:,i) = concatenateRts(GraphB.Mot(:,:,i), inverseRt(RtBW2AW));
end
GraphAB.frames = [GraphA.frames newFramesFromB];
GraphAB.Mot(:,:,length(GraphA.frames)+1:length(GraphAB.frames)) = GraphB.Mot(:,:,indexNewFramesFromB);
% add the new tracks
for commonFrame = commonFrames
cameraIDA = find(GraphA.frames==commonFrame); cameraIDB = find(GraphB.frames==commonFrame);
trA = find(GraphA.ObsIdx(cameraIDA,:)~=0);
xyA = GraphA.ObsVal(:,GraphA.ObsIdx(cameraIDA,trA));
trB = find(GraphB.ObsIdx(cameraIDB,:)~=0);
xyB = GraphB.ObsVal(:,GraphB.ObsIdx(cameraIDB,trB));
[xyCommon,iA,iB] = intersect(xyA',xyB','rows');
xyCommon = xyCommon';
% make the old track longer
for i=1:size(xyCommon,2)
idA = trA(iA(i));
idB = trB(iB(i));
for j=1:length(indexNewFramesFromB)
BObsIdx = GraphB.ObsIdx(indexNewFramesFromB(j),idB);
if BObsIdx~=0
GraphAB.ObsVal(:,end+1) = GraphB.ObsVal(:,BObsIdx);
GraphAB.ObsIdx(length(GraphA.frames)+j,idA) = size(GraphAB.ObsVal,2);
end
end
end
% add the new tracks from common frame
[xyNewFromB, iB] = setdiff(xyB',xyA','rows');
xyNewFromB = xyNewFromB';
for i=1:size(xyNewFromB,2)
idB = trB(iB(i));
GraphAB.ObsVal(:,end+1) = GraphB.ObsVal(:,GraphB.ObsIdx(cameraIDB,idB));
GraphAB.ObsIdx(cameraIDA,end+1) = size(GraphAB.ObsVal,2);
GraphAB.Str(:,end+1) = GraphB.Str(:,idB);
for j=1:length(indexNewFramesFromB)
BObsIdx = GraphB.ObsIdx(indexNewFramesFromB(j),idB);
if BObsIdx~=0
GraphAB.ObsVal(:,end+1) = GraphB.ObsVal(:,BObsIdx);
GraphAB.ObsIdx(length(GraphA.frames)+j,end) = size(GraphAB.ObsVal,2);
end
end
end
end
% add the new tracks only among the completely new frames
newB = false(1,length(GraphB.frames));
newB(indexNewFramesFromB) = true;
tr2add = sum(GraphB.ObsIdx(~newB,:)~=0,1)==0 & sum(GraphB.ObsIdx(newB,:)~=0,1)>0;
if any(tr2add)
ids = full(GraphB.ObsIdx(indexNewFramesFromB,tr2add));
curValCnt = size(GraphAB.ObsVal,2);
nonZerosID = find(ids(:)>0);
GraphAB.ObsVal(:,(curValCnt+1):(curValCnt+length(nonZerosID))) = GraphB.ObsVal(:,ids(nonZerosID));
idsNew = zeros(size(ids));
idsNew(nonZerosID) = (curValCnt+1):(curValCnt+length(nonZerosID));
GraphAB.ObsIdx(length(GraphA.frames)+1:end,size(GraphAB.ObsIdx,2)+1:size(GraphAB.ObsIdx,2)+size(idsNew,2)) = sparse(idsNew);
GraphAB.Str(:,size(GraphAB.ObsIdx,2)+1:size(GraphAB.ObsIdx,2)+size(idsNew,2)) = GraphB.Str(:,tr2add);
end
return;
%{
pointCount = size(MatchPairs{1}.matches,2);
pointObservedValueCount = size(MatchPairs{1}.matches,2)*2;
pointObservedValue(:,1:pointObservedValueCount) = [[MatchPairs{1}.matches(1:5,:) MatchPairs{1}.matches(6:10,:)]; -wTimePoints * ones(1,pointObservedValueCount)];
pointObserved(1,1:pointCount)=1:pointCount;
pointObserved(2,1:pointCount)=pointCount + (1:pointCount);
previousIndex = 1:pointCount;
pointCloud(:,1:pointCount) = MatchPairs{1}.matches(3:5,:);
for frameID = 2:length(data.image)-1
[~,iA,iB] = intersect(MatchPairs{frameID-1}.matches(6:7,:)',MatchPairs{frameID}.matches(1:2,:)','rows');
alreadyExist = false(1,size(MatchPairs{frameID}.matches,2));
alreadyExist(iB) = true;
newCount = sum(~alreadyExist);
currentIndex = zeros(1,size(MatchPairs{frameID}.matches,2));
currentIndex(iB) = previousIndex(iA);
currentIndex(~alreadyExist) = (pointCount+1):(pointCount+newCount);
pointObservedValue(1:5,pointObservedValueCount+1:pointObservedValueCount+newCount+length(currentIndex)) = [MatchPairs{frameID}.matches(1:5,~alreadyExist) MatchPairs{frameID}.matches(6:10,:)];
pointObservedValue(6,pointObservedValueCount+1:pointObservedValueCount+newCount+length(currentIndex)) = -wTimePoints;
pointObserved(frameID ,currentIndex(~alreadyExist)) = (pointObservedValueCount+1):(pointObservedValueCount+newCount);
pointObservedValueCount = pointObservedValueCount + newCount;
pointObserved(frameID+1,currentIndex) = (pointObservedValueCount+1):(pointObservedValueCount+length(currentIndex));
pointObservedValueCount = pointObservedValueCount + length(currentIndex);
pointCloud(:,pointCount+1:pointCount+newCount) = transformRT(MatchPairs{frameID}.matches(3:5,~alreadyExist), cameraRtC2W(:,:,frameID), false);
pointCount = pointCount + newCount;
previousIndex = currentIndex;
end
%} | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/extractFocalFromEXIF.m | .m | 10,459 | 311 | function focal_pixels = extractFocalFromEXIF(imageFileName)
focal_pixels = [];
% Add iphone support
% Refer to http://phototour.cs.washington.edu/focal.html
% list extracted from Bundler by Noah Snavely
ccd_widths = {...
'Asahi Optical Co.,Ltd. PENTAX Optio330RS', 7.176;
'Canon Canon DIGITAL IXUS 400', 7.176;
'Canon Canon DIGITAL IXUS 40', 5.76;
'Canon Canon DIGITAL IXUS 430', 7.176;
'Canon Canon DIGITAL IXUS 500', 7.176;
'Canon Canon DIGITAL IXUS 50', 5.76;
'Canon Canon DIGITAL IXUS 55', 5.76;
'Canon Canon DIGITAL IXUS 60', 5.76;
'Canon Canon DIGITAL IXUS 65', 5.76;
'Canon Canon DIGITAL IXUS 700', 7.176;
'Canon Canon DIGITAL IXUS 750', 7.176;
'Canon Canon DIGITAL IXUS 800 IS', 5.76;
'Canon Canon DIGITAL IXUS II', 5.27;
'Canon Canon EOS 10D', 22.7;
'Canon Canon EOS-1D Mark II', 28.7;
'Canon Canon EOS-1Ds Mark II', 35.95;
'Canon Canon EOS 20D', 22.5;
'Canon Canon EOS 20D', 22.5;
'Canon Canon EOS 300D DIGITAL', 22.66;
'Canon Canon EOS 30D', 22.5;
'Canon Canon EOS 350D DIGITAL', 22.2;
'Canon Canon EOS 400D DIGITAL', 22.2;
'Canon Canon EOS 40D', 22.2;
'Canon Canon EOS 5D', 35.8;
'Canon Canon EOS DIGITAL REBEL', 22.66;
'Canon Canon EOS DIGITAL REBEL XT', 22.2;
'Canon Canon EOS DIGITAL REBEL XTi', 22.2;
'Canon Canon EOS Kiss Digital', 22.66;
'Canon Canon IXY DIGITAL 600', 7.176;
'Canon Canon PowerShot A10', 5.27;
'Canon Canon PowerShot A20', 7.176;
'Canon Canon PowerShot A400', 4.54;
'Canon Canon PowerShot A40', 5.27;
'Canon Canon PowerShot A510', 5.76;
'Canon Canon PowerShot A520', 5.76;
'Canon Canon PowerShot A530', 5.76;
'Canon Canon PowerShot A60', 5.27;
'Canon Canon PowerShot A620', 7.176;
'Canon Canon PowerShot A630', 7.176;
'Canon Canon PowerShot A640', 7.176;
'Canon Canon PowerShot A700', 5.76;
'Canon Canon PowerShot A70', 5.27;
'Canon Canon PowerShot A710 IS', 5.76;
'Canon Canon PowerShot A75', 5.27;
'Canon Canon PowerShot A80', 7.176;
'Canon Canon PowerShot A85', 5.27;
'Canon Canon PowerShot A95', 7.176;
'Canon Canon PowerShot G1', 7.176;
'Canon Canon PowerShot G2', 7.176;
'Canon Canon PowerShot G3', 7.176;
'Canon Canon PowerShot G5', 7.176;
'Canon Canon PowerShot G6', 7.176;
'Canon Canon PowerShot G7', 7.176;
'Canon Canon PowerShot G9', 7.600;
'Canon Canon PowerShot Pro1', 8.8;
'Canon Canon PowerShot S110', 5.27;
'Canon Canon PowerShot S1 IS', 5.27;
'Canon Canon PowerShot S200', 5.27;
'Canon Canon PowerShot S2 IS', 5.76;
'Canon Canon PowerShot S30', 7.176;
'Canon Canon PowerShot S3 IS', 5.76;
'Canon Canon PowerShot S400', 7.176;
'Canon Canon PowerShot S40', 7.176;
'Canon Canon PowerShot S410', 7.176;
'Canon Canon PowerShot S45', 7.176;
'Canon Canon PowerShot S500', 7.176;
'Canon Canon PowerShot S50', 7.176;
'Canon Canon PowerShot S60', 7.176;
'Canon Canon PowerShot S70', 7.176;
'Canon Canon PowerShot S80', 7.176;
'Canon Canon PowerShot SD1000', 5.75;
'Canon Canon PowerShot SD100', 5.27;
'Canon Canon PowerShot SD10', 5.75;
'Canon Canon PowerShot SD110', 5.27;
'Canon Canon PowerShot SD200', 5.76;
'Canon Canon PowerShot SD300', 5.76;
'Canon Canon PowerShot SD400', 5.76;
'Canon Canon PowerShot SD450', 5.76;
'Canon Canon PowerShot SD500', 7.176;
'Canon Canon PowerShot SD550', 7.176;
'Canon Canon PowerShot SD600', 5.76;
'Canon Canon PowerShot SD630', 5.76;
'Canon Canon PowerShot SD700 IS', 5.76;
'Canon Canon PowerShot SD750', 5.75;
'Canon Canon PowerShot SD800 IS', 5.76;
'Canon EOS 300D DIGITAL', 22.66;
'Canon EOS DIGITAL REBEL', 22.66;
'Canon PowerShot A510', 5.76;
'Canon PowerShot S30', 7.176;
'CASIO COMPUTER CO.,LTD. EX-S500', 5.76;
'CASIO COMPUTER CO.,LTD. EX-Z1000', 7.716;
'CASIO COMPUTER CO.,LTD EX-Z30', 5.76;
'CASIO COMPUTER CO.,LTD. EX-Z600', 5.76;
'CASIO COMPUTER CO.,LTD. EX-Z60', 7.176;
'CASIO COMPUTER CO.,LTD EX-Z750', 7.176;
'CASIO COMPUTER CO.,LTD. EX-Z850', 7.176;
'EASTMAN KODAK COMPANY KODAK CX7330 ZOOM DIGITAL CAMERA', 5.27;
'EASTMAN KODAK COMPANY KODAK CX7530 ZOOM DIGITAL CAMERA', 5.76;
'EASTMAN KODAK COMPANY KODAK DX3900 ZOOM DIGITAL CAMERA', 7.176;
'EASTMAN KODAK COMPANY KODAK DX4900 ZOOM DIGITAL CAMERA', 7.176;
'EASTMAN KODAK COMPANY KODAK DX6340 ZOOM DIGITAL CAMERA', 5.27;
'EASTMAN KODAK COMPANY KODAK DX6490 ZOOM DIGITAL CAMERA', 5.76;
'EASTMAN KODAK COMPANY KODAK DX7630 ZOOM DIGITAL CAMERA', 7.176;
'EASTMAN KODAK COMPANY KODAK Z650 ZOOM DIGITAL CAMERA', 5.76;
'EASTMAN KODAK COMPANY KODAK Z700 ZOOM DIGITAL CAMERA', 5.76;
'EASTMAN KODAK COMPANY KODAK Z740 ZOOM DIGITAL CAMERA', 5.76;
'EASTMAN KODAK COMPANY KODAK Z740 ZOOM DIGITAL CAMERA', 5.76;
'FUJIFILM FinePix2600Zoom', 5.27;
'FUJIFILM FinePix40i', 7.600;
'FUJIFILM FinePix A310', 5.27;
'FUJIFILM FinePix A330', 5.27;
'FUJIFILM FinePix A600', 7.600;
'FUJIFILM FinePix E500', 5.76;
'FUJIFILM FinePix E510', 5.76;
'FUJIFILM FinePix E550', 7.600;
'FUJIFILM FinePix E900', 7.78;
'FUJIFILM FinePix F10', 7.600;
'FUJIFILM FinePix F30', 7.600;
'FUJIFILM FinePix F450', 5.76;
'FUJIFILM FinePix F601 ZOOM', 7.600;
'FUJIFILM FinePix S3Pro', 23.0;
'FUJIFILM FinePix S5000', 5.27;
'FUJIFILM FinePix S5200', 5.76;
'FUJIFILM FinePix S5500', 5.27;
'FUJIFILM FinePix S6500fd', 7.600;
'FUJIFILM FinePix S7000', 7.600;
'FUJIFILM FinePix Z2', 5.76;
'Hewlett-Packard hp 635 Digital Camera', 4.54;
'Hewlett-Packard hp PhotoSmart 43x series', 5.27;
'Hewlett-Packard HP PhotoSmart 618 (V1.1)', 5.27;
'Hewlett-Packard HP PhotoSmart C945 (V01.61)', 7.176;
'Hewlett-Packard HP PhotoSmart R707 (V01.00)', 7.176;
'KONICA MILOLTA DYNAX 5D', 23.5;
'Konica Minolta Camera, Inc. DiMAGE A2', 8.80;
'KONICA MINOLTA CAMERA, Inc. DiMAGE G400', 5.76;
'Konica Minolta Camera, Inc. DiMAGE Z2', 5.76;
'KONICA MINOLTA DiMAGE A200', 8.80;
'KONICA MINOLTA DiMAGE X1', 7.176;
'KONICA MINOLTA DYNAX 5D', 23.5;
'Minolta Co., Ltd. DiMAGE F100', 7.176;
'Minolta Co., Ltd. DiMAGE Xi', 5.27;
'Minolta Co., Ltd. DiMAGE Xt', 5.27;
'Minolta Co., Ltd. DiMAGE Z1', 5.27;
'NIKON COOLPIX L3', 5.76;
'NIKON COOLPIX P2', 7.176;
'NIKON COOLPIX S4', 5.76;
'NIKON COOLPIX S7c', 5.76;
'NIKON CORPORATION NIKON D100', 23.7;
'NIKON CORPORATION NIKON D1', 23.7;
'NIKON CORPORATION NIKON D1H', 23.7;
'NIKON CORPORATION NIKON D200', 23.6;
'NIKON CORPORATION NIKON D2H', 23.3;
'NIKON CORPORATION NIKON D2X', 23.7;
'NIKON CORPORATION NIKON D40', 23.7;
'NIKON CORPORATION NIKON D50', 23.7;
'NIKON CORPORATION NIKON D60', 23.6;
'NIKON CORPORATION NIKON D70', 23.7;
'NIKON CORPORATION NIKON D70s', 23.7;
'NIKON CORPORATION NIKON D80', 23.6;
'NIKON E2500', 5.27;
'NIKON E2500', 5.27;
'NIKON E3100', 5.27;
'NIKON E3200', 5.27;
'NIKON E3700', 5.27;
'NIKON E4200', 7.176;
'NIKON E4300', 7.18;
'NIKON E4500', 7.176;
'NIKON E4600', 5.76;
'NIKON E5000', 8.80;
'NIKON E5200', 7.176;
'NIKON E5400', 7.176;
'NIKON E5600', 5.76;
'NIKON E5700', 8.80;
'NIKON E5900', 7.176;
'NIKON E7600', 7.176;
'NIKON E775', 5.27;
'NIKON E7900', 7.176;
'NIKON E7900', 7.176;
'NIKON E8800', 8.80;
'NIKON E990', 7.176;
'NIKON E995', 7.176;
'NIKON S1', 5.76;
'Nokia N80', 5.27;
'Nokia N80', 5.27;
'Nokia N93', 4.536;
'Nokia N95', 5.7;
'OLYMPUS CORPORATION C-5000Z', 7.176;
'OLYMPUS CORPORATION C5060WZ', 7.176;
'OLYMPUS CORPORATION C750UZ', 5.27;
'OLYMPUS CORPORATION C765UZ', 5.76;
'OLYMPUS CORPORATION C8080WZ', 8.80;
'OLYMPUS CORPORATION X250,D560Z,C350Z', 5.76;
'OLYMPUS CORPORATION X-3,C-60Z', 7.176;
'OLYMPUS CORPORATION X400,D580Z,C460Z', 5.27;
'OLYMPUS IMAGING CORP. E-500', 17.3;
'OLYMPUS IMAGING CORP. FE115,X715', 5.76;
'OLYMPUS IMAGING CORP. SP310', 7.176;
'OLYMPUS IMAGING CORP. SP510UZ', 5.75;
'OLYMPUS IMAGING CORP. SP550UZ', 5.76;
'OLYMPUS IMAGING CORP. uD600,S600', 5.75;
'OLYMPUS_IMAGING_CORP. X450,D535Z,C370Z', 5.27;
'OLYMPUS IMAGING CORP. X550,D545Z,C480Z', 5.76;
'OLYMPUS OPTICAL CO.,LTD C2040Z', 6.40;
'OLYMPUS OPTICAL CO.,LTD C211Z', 5.27;
'OLYMPUS OPTICAL CO.,LTD C2Z,D520Z,C220Z', 4.54;
'OLYMPUS OPTICAL CO.,LTD C3000Z', 7.176;
'OLYMPUS OPTICAL CO.,LTD C300Z,D550Z', 5.4;
'OLYMPUS OPTICAL CO.,LTD C4100Z,C4000Z', 7.176;
'OLYMPUS OPTICAL CO.,LTD C750UZ', 5.27;
'OLYMPUS OPTICAL CO.,LTD X-2,C-50Z', 7.176;
'OLYMPUS SP550UZ', 5.76;
'OLYMPUS X100,D540Z,C310Z', 5.27;
'Panasonic DMC-FX01', 5.76;
'Panasonic DMC-FX07', 5.75;
'Panasonic DMC-FX9', 5.76;
'Panasonic DMC-FZ20', 5.760;
'Panasonic DMC-FZ2', 4.54;
'Panasonic DMC-FZ30', 7.176;
'Panasonic DMC-FZ50', 7.176;
'Panasonic DMC-FZ5', 5.760;
'Panasonic DMC-FZ7', 5.76;
'Panasonic DMC-LC1', 8.80;
'Panasonic DMC-LC33', 5.760;
'Panasonic DMC-LX1', 8.50;
'Panasonic DMC-LZ2', 5.76;
'Panasonic DMC-TZ1', 5.75;
'Panasonic DMC-TZ3', 5.68;
'PENTAX Corporation PENTAX *ist DL', 23.5;
'PENTAX Corporation PENTAX *ist DS2', 23.5;
'PENTAX Corporation PENTAX *ist DS', 23.5;
'PENTAX Corporation PENTAX K100D', 23.5;
'PENTAX Corporation PENTAX Optio 450', 7.176;
'PENTAX Corporation PENTAX Optio 550', 7.176;
'PENTAX Corporation PENTAX Optio E10', 5.76;
'PENTAX Corporation PENTAX Optio S40', 5.76;
'PENTAX Corporation PENTAX Optio S4', 5.76;
'PENTAX Corporation PENTAX Optio S50', 5.76;
'PENTAX Corporation PENTAX Optio S5i', 5.76;
'PENTAX Corporation PENTAX Optio S5z', 5.76;
'PENTAX Corporation PENTAX Optio SV', 5.76;
'PENTAX Corporation PENTAX Optio WP', 5.75;
'RICOH CaplioG3 modelM', 5.27;
'RICOH Caplio GX', 7.176;
'RICOH Caplio R30', 5.75;
'Samsung Digimax 301', 5.27;
'Samsung Techwin <Digimax i5, Samsung #1>', 5.76;
'SAMSUNG TECHWIN Pro 815', 8.80;
'SONY DSC-F828', 8.80;
'SONY DSC-N12', 7.176;
'SONY DSC-P100', 7.176;
'SONY DSC-P10', 7.176;
'SONY DSC-P12', 7.176;
'SONY DSC-P150', 7.176;
'SONY DSC-P200', 7.176;
'SONY DSC-P52', 5.27;
'SONY DSC-P72', 5.27;
'SONY DSC-P73', 5.27;
'SONY DSC-P8', 5.27;
'SONY DSC-R1', 21.5;
'SONY DSC-S40', 5.27;
'SONY DSC-S600', 5.760;
'SONY DSC-T9', 7.18;
'SONY DSC-V1', 7.176;
'SONY DSC-W1', 7.176;
'SONY DSC-W30', 5.760;
'SONY DSC-W50', 5.75;
'SONY DSC-W5', 7.176;
'SONY DSC-W7', 7.176;
'SONY DSC-W80', 5.75};
exif = imfinfo(imageFileName);
img = imread(imageFileName);
res_x = max(size(img,1),size(img,2));
if isfield(exif,'DigitalCamera') && isfield(exif.DigitalCamera,'FocalLengthIn35mmFilm')
% this will take care of iphone
ccd_width_mm = 36;
focal_mm = exif.DigitalCamera.FocalLengthIn35mmFilm;
focal_pixels = res_x * (focal_mm / ccd_width_mm);
elseif isfield(exif,'DigitalCamera') && isfield(exif.DigitalCamera,'FocalLength')
str = sprintf('%s %s',exif.Make,exif.Model);
% Trim leading, trailing spaces
str = str;
ccd_width_mm = ccd_widths{find(ismember(ccd_widths(:,1),str)),2};
focal_mm = exif.DigitalCamera.FocalLength;
focal_pixels = res_x * (focal_mm / ccd_width_mm);
end
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/visualizeReprojection.m | .m | 1,651 | 42 | function visualizeReprojection(graph, frames)
nfigs = size(graph.ObsIdx,1);
w = ceil(sqrt(nfigs));
h = floor(sqrt(nfigs));
subplot(w,h,1);
for c=1:size(graph.ObsIdx,1)
subplot(w,h,c);
im = imresize(imread(frames.images{c}),frames.imsize(1:2));
imshow(im);
hold on
X = f2K(graph.f) * transformPtsByRt(graph.Str,graph.Mot(:,:,c));
xy = X(1:2,:) ./ X([3 3],:);
selector = find(graph.ObsIdx(c,:)~=0);
unselector = graph.ObsIdx(c,:)==0;
tx_points = xy(:, selector);
unmatching_points = xy(:, unselector);
matching_points = graph.ObsVal(:,graph.ObsIdx(c,selector));
fprintf('matching points: %d\n', size(matching_points));
centered_tx_points(1, :) = size(im, 2)/2 - tx_points(1, :);
centered_tx_points(2, :) = size(im, 1)/2 - tx_points(2, :);
centered_matching_points(1, :) = size(im, 2)/2 - matching_points(1, :);
centered_matching_points(2, :) = size(im, 1)/2 - matching_points(2, :);
centered_unmatching_points(1, :) = size(im, 2)/2 - unmatching_points(1, :);
centered_unmatching_points(2, :) = size(im, 1)/2 - unmatching_points(2, :);
plot(centered_tx_points(1, :),centered_tx_points(2, :), 'g+');
plot(centered_matching_points(1, :), centered_matching_points(2, :), 'rx');
linesx = [centered_tx_points(1, :); centered_matching_points(1, :)];
linesy = [centered_tx_points(2, :); centered_matching_points(2, :)];
plot(linesx, linesy, 'g');
plot(centered_unmatching_points(1, :), centered_unmatching_points(2, :), 'yo');
clear centered_tx_points;
clear centered_matching_points;
clear centered_unmatching_points;
end
end | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/visualizeGraph.m | .m | 286 | 14 | function visualizeGraph(graph,frames)
%figure
plot3(graph.Str(1,:),graph.Str(2,:),graph.Str(3,:),'.r')
axis equal
nCam=length(graph.frames);
for i=1:nCam
drawCamera(graph.Mot(:,:,i), frames.imsize(2), frames.imsize(1), graph.f, 0.001,1); %i*2-1);
end
axis tight
%view(-180,-60);
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/estimateE.m | .m | 367 | 9 | function pair = estimateE(pair,frames)
t = .002; % Distance threshold for deciding outliers
[E, inliers] = ransac5point(pair.matches(1:2,:), pair.matches(3:4,:), t, frames.K, 1);
fprintf('%d inliers / %d SIFT matches = %.2f%%\n', length(inliers), size(pair.matches,2), 100*length(inliers)/size(pair.matches,2));
pair.matches = pair.matches(:,inliers);
pair.E = E; | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/estimateF.m | .m | 475 | 9 | function pair = estimateF(pair)
t = .002; % Distance threshold for deciding outliers
[F, inliers] = ransacfitfundmatrix(pair.matches(1:2,:), pair.matches(3:4,:), t, 0);
%t = .02; % Distance threshold for deciding outliers
%[F, inliers] = ransacfitfundmatrix7(SIFTloc_i, SIFTloc_j, t, 1);
fprintf('%d inliers / %d SIFT matches = %.2f%%\n', length(inliers), size(pair.matches,2), 100*length(inliers)/size(pair.matches,2));
pair.matches = pair.matches(:,inliers);
pair.F = F; | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/drawCamera.m | .m | 885 | 26 | function drawCamera(Rt, w, h, f, scale, lineWidth)
% SFMedu: Structrue From Motion for Education Purpose
% Written by Jianxiong Xiao (MIT License)
% Xcamera = Rt * Xworld
V= [...
0 0 0 f -w/2 w/2 w/2 -w/2
0 0 f 0 -h/2 -h/2 h/2 h/2
0 f 0 0 f f f f];
V = V*scale;
V = transformPtsByRt(V, Rt, true);
hold on;
plot3(V(1,[1 4]),V(2,[1 4]),V(3,[1 4]),'-r','LineWidth',lineWidth);
plot3(V(1,[1 3]),V(2,[1 3]),V(3,[1 3]),'-g','LineWidth',lineWidth);
plot3(V(1,[1 2]),V(2,[1 2]),V(3,[1 2]),'-b','LineWidth',lineWidth);
plot3(V(1,[1 5]),V(2,[1 5]),V(3,[1 5]),'-k','LineWidth',lineWidth);
plot3(V(1,[1 6]),V(2,[1 6]),V(3,[1 6]),'-k','LineWidth',lineWidth);
plot3(V(1,[1 7]),V(2,[1 7]),V(3,[1 7]),'-k','LineWidth',lineWidth);
plot3(V(1,[1 8]),V(2,[1 8]),V(3,[1 8]),'-k','LineWidth',lineWidth);
plot3(V(1,[5 6 7 8 5]),V(2,[5 6 7 8 5]),V(3,[5 6 7 8 5]),'-k','LineWidth',lineWidth);
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/full_pipeline.m | .m | 13,567 | 394 | function full_pipeline(data_seq_idx, adjust_focal_length, do_sequential_match, maxSize, visualize, do_dense)
%% run full pipeline with specified parameters
%% data
if data_seq_idx == 0
frames.images{1}='images/B21.jpg';
frames.images{2}='images/B22.jpg';
frames.images{3}='images/B23.jpg';
frames.images{4}='images/B24.jpg';
frames.images{5}='images/B25.jpg';
try
frames.focal_length = extractFocalFromEXIF(frames.images{1});
catch
end
if ~isfield(frames,'focal_length') || isempty(frames.focal_length)
fprintf('Warning: cannot find the focal length from the EXIF\n');
frames.focal_length = 719.5459; % for testing with the B??.jpg sequences
end
elseif data_seq_idx == 1
frames.images{1}='images/couch21.jpg';
frames.images{2}='images/couch22.jpg';
frames.images{3}='images/couch23.jpg';
frames.images{4}='images/couch24.jpg';
frames.images{5}='images/couch25.jpg';
frames.focal_length = 968.3333333333334 ;%1106.67;
% %
elseif data_seq_idx == 2
frames.images{1}='images/mug1.jpg';
frames.images{2}='images/mug2.jpg';
frames.images{3}='images/mug3.jpg';
frames.images{4}='images/mug4.jpg';
frames.images{5}='images/mug5.jpg';
frames.focal_length = 968.3333333333334 ;%1106.67;
elseif data_seq_idx == 3
frames.images{1}='images/self1_1.jpg';
frames.images{2}='images/self1_2.jpg';
frames.images{3}='images/self1_3.jpg';
frames.images{4}='images/self1_4.jpg';
frames.images{5}='images/self1_5.jpg';
frames.focal_length = 3300 ;
elseif data_seq_idx == 4
frames.images{1}='images/self2_1.jpg';
frames.images{2}='images/self2_2.jpg';
frames.images{3}='images/self2_3.jpg';
frames.images{4}='images/self2_4.jpg';
frames.images{5}='images/self2_5.jpg';
frames.focal_length = 3300 ;
elseif data_seq_idx == 5
frames.images{1}='images/self3_1.jpg';
frames.images{2}='images/self3_2.jpg';
frames.images{3}='images/self3_3.jpg';
frames.images{4}='images/self3_4.jpg';
frames.images{5}='images/self3_5.jpg';
frames.focal_length = 3300 ;
elseif data_seq_idx == 6
frames.images{1}='images/self4_1.jpg';
frames.images{2}='images/self4_2.jpg';
frames.images{3}='images/self4_3.jpg';
frames.images{4}='images/self4_4.jpg';
frames.images{5}='images/self4_5.jpg';
frames.focal_length = 3300 ;
else
error('unknown dataseq idx\n');
end
%% data
frames.length = length(frames.images);
frames.imsize = size(imread(frames.images{1}));
if max(frames.imsize)>maxSize
scale = maxSize/max(frames.imsize);
frames.focal_length = frames.focal_length * scale;
frames.imsize = size(imresize(imread(frames.images{1}),scale));
end
frames.K = f2K(frames.focal_length);
disp('intrinsics:');
disp(frames.K);
all_pairs = [];
adj_mat = zeros(frames.length);
%% do MST match... all pairs matching + weighted Nkeypoint minimum spanning tree
if ~do_sequential_match
fprintf('\n\n\tDoing Minimum Spanning Tree matching \n\n');
for frame=1:frames.length-1
for f2=frame+1:frames.length
s = RandStream('mcg16807','Seed',10);
RandStream.setGlobalStream(s);
% higher the score, the more matches returned (easier to
% match with given score)
[pair, score] = match2viewSIFTReturnThresh(frames, frame, f2);
iscore = 1 - score;
pair.nmatches = size(pair.matches, 2);
if ~isstruct(all_pairs)
all_pairs = repmat(pair, frames.length + 1, frames.length + 1);
end
all_pairs(frame,f2) = pair;
all_pairs(f2,frame) = pair;
adj_mat(frame,f2) = pair.nmatches * iscore;
adj_mat(f2,frame) = pair.nmatches * iscore;
end
end
maxmat = max(max(adj_mat));
adj_mat_flip = maxmat + 1 - adj_mat;
adj_mat_flip(adj_mat_flip == maxmat + 1) = 0;
[mst, trav] = graphminspantree(sparse(adj_mat_flip));
path = graphpred2path(trav);
full_path = path{length(path)};
ordered_inds = zeros(length(full_path)-1, 2);
for ii=1:length(full_path) - 1
ordered_inds(ii, :) = [full_path(ii), full_path(ii+1)];
end
count = 0;
for oidx = 1:length(ordered_inds)
oi = ordered_inds(oidx, :);
count = count + 1;
% need to set this random seed to produce exact same result
s = RandStream('mcg16807','Seed',10);
RandStream.setGlobalStream(s);
fprintf('adding edge (%d, %d)', oi(1), oi(2));
pair = all_pairs(oi(1), oi(2));
% if visualize, showMatches(pair,frames); title('raw feature matching'); end
if true % choose between different ways of getting E
% Estimate Fundamental matrix
pair = estimateF(pair);
% Convert Fundamental Matrix to Essential Matrix
pair.E = frames.K' * pair.F * frames.K; % MVG Page 257 Equation 9.12
else
% Estimate Essential Matrix directly using 5-point algorithm
pair = estimateE(pair,frames);
end
% if visualize, showMatches(pair,frames); title('inliers'); end
% Get Poses from Essential Matrix
pair.Rt = RtFromE(pair,frames);
% Convert the pair into the BA format
Graph{count} = pair2graph(pair,frames);
% re-triangulation
Graph{count} = triangulate(Graph{count},frames);
% if visualize, visualizeGraph(Graph{count},frames); title('triangulation'); end
% outlier rejection
% Graph{frame} = removeOutlierPts(Graph{frame});
% bundle adjustment
if adjust_focal_length
Graph{count} = bundleAdjustmentFull(Graph{count});
else
Graph{frame} = bundleAdjustment(Graph{frame}, adjust_focal_length);
end
% if visualize, visualizeGraph(Graph{count},frames); title('after two-view bundle adjustment'); end
end
end
%% SIFT matching and Fundamental Matrix Estimation
if do_sequential_match
fprintf('\n\n\tDoing Sequential matching \n\n');
for frame=1:frames.length-1
% need to set this random seed to produce exact same result
s = RandStream('mcg16807','Seed',10);
RandStream.setGlobalStream(s);
% keypoint matching
%pair = match2viewSURF(frames, frame, frame+1);
pair = match2viewSIFT(frames, frame, frame+1);
% if visualize, showMatches(pair,frames); title('raw feature matching'); end
if true % choose between different ways of getting E
% Estimate Fundamental matrix
pair = estimateF(pair);
% Convert Fundamental Matrix to Essential Matrix
pair.E = frames.K' * pair.F * frames.K; % MVG Page 257 Equation 9.12
else
% Estimate Essential Matrix directly using 5-point algorithm
pair = estimateE(pair,frames);
end
% if visualize, showMatches(pair,frames); title('inliers'); end
% Get Poses from Essential Matrix
pair.Rt = RtFromE(pair,frames);
% Convert the pair into the BA format
Graph{frame} = pair2graph(pair,frames);
% re-triangulation
Graph{frame} = triangulate(Graph{frame},frames);
% if visualize, visualizeGraph(Graph{frame},frames); title('triangulation'); end
% outlier rejection
% Graph{frame} = removeOutlierPts(Graph{frame});
% bundle adjustment
if adjust_focal_length
Graph{frame} = bundleAdjustmentFull(Graph{frame});
else
Graph{frame} = bundleAdjustment(Graph{frame}, adjust_focal_length);
end
% if visualize, visualizeGraph(Graph{frame},frames); title('after two-view bundle adjustment'); end
end
end
drawnow
fprintf('visualize: %d\n', visualize);
%% merge the graphs
%close all
fprintf('\n\nmerging graphs....\n');
sizes = zeros(1, length(Graph));
for i=1:length(Graph)
sizes(i) = size(Graph{i}.matches, 2);
end
% original used for naive KP-match sorted order, but fails when the pair
% isn't computed (no edge between two frames)
% [~, sort_inds] = sort(sizes, 'descend');
% sorted order, which may be sequential, or corresponds to KP MST
sort_inds = 1:length(Graph);
mergedGraph = Graph{sort_inds(1)};
for idx=2:frames.length-1
frame = sort_inds(idx);
% merge graph
mergedGraph = merge2graphs(mergedGraph,Graph{frame});
% re-triangulation
mergedGraph = triangulate(mergedGraph,frames);
% if visualize, visualizeGraph(mergedGraph,frames); title('triangulation'); end
% outlier rejection
% mergedGraph = removeOutlierPts(mergedGraph,10);
% bundle adjustment
if adjust_focal_length
mergedGraph = bundleAdjustmentFull(mergedGraph);
else
mergedGraph = bundleAdjustment(mergedGraph, adjust_focal_length);
end
% outlier rejection
mergedGraph = removeOutlierPts(mergedGraph, 10);
% bundle adjustment
if adjust_focal_length
mergedGraph = bundleAdjustmentFull(mergedGraph);
else
mergedGraph = bundleAdjustment(mergedGraph, adjust_focal_length);
end
% if visualize, visualizeGraph(mergedGraph,frames); title('after bundle adjustment'); end
end
printReprojectionError(mergedGraph); % [for homework]
if visualize
f = figure();
visualizeReprojection(mergedGraph,frames);
drawnow;
features_file_name = char(strcat(string(data_seq_idx),'_feat.png'));
saveas(f,features_file_name);
end% [for homework]
points2ply('sparse.ply',mergedGraph.Str);
if frames.focal_length ~= mergedGraph.f
disp('Focal length is adjusted by bundle adjustment');
frames.focal_length = mergedGraph.f;
% frames.K = f2K(frames.focal_length);
frames.K = graph2K(mergedGraph);
disp(frames.K);
end
%% dense matching
if do_dense
fprintf('dense matching ...\n');
for frame=1:frames.length-1
Graph{frame} = denseMatch(Graph{frame}, frames, frame, frame+1);
end
%% dense reconstruction
fprintf('triangulating dense points ...\n');
for frame=1:frames.length-1
clear X;
P{1} = frames.K * mergedGraph.Mot(:,:,frame);
P{2} = frames.K * mergedGraph.Mot(:,:,frame+1);
%par
for j=1:size(Graph{frame}.denseMatch,2)
X(:,j) = vgg_X_from_xP_nonlin(reshape(Graph{frame}.denseMatch(1:4,j),2,2),P,repmat([frames.imsize(2);frames.imsize(1)],1,2));
end
X = X(1:3,:) ./ X([4 4 4],:);
x1= P{1} * [X; ones(1,size(X,2))];
x2= P{2} * [X; ones(1,size(X,2))];
x1 = x1(1:2,:) ./ x1([3 3],:);
x2 = x2(1:2,:) ./ x2([3 3],:);
Graph{frame}.denseX = X;
Graph{frame}.denseRepError = sum(([x1; x2] - Graph{frame}.denseMatch(1:4,:)).^2,1);
Rt1 = mergedGraph.Mot(:, :, frame);
Rt2 = mergedGraph.Mot(:, :, frame+1);
C1 = - Rt1(1:3, 1:3)' * Rt1(:, 4);
C2 = - Rt2(1:3, 1:3)' * Rt2(:, 4);
view_dirs_1 = bsxfun(@minus, X, C1);
view_dirs_2 = bsxfun(@minus, X, C2);
view_dirs_1 = bsxfun(@times, view_dirs_1, 1 ./ sqrt(sum(view_dirs_1 .* view_dirs_1)));
view_dirs_2 = bsxfun(@times, view_dirs_2, 1 ./ sqrt(sum(view_dirs_2 .* view_dirs_2)));
Graph{frame}.cos_angles = sum(view_dirs_1 .* view_dirs_2);
c_dir1 = Rt1(3, 1:3)';
c_dir2 = Rt2(3, 1:3)';
Graph{frame}.visible = (sum(bsxfun(@times, view_dirs_1, c_dir1)) > 0) & (sum(bsxfun(@times, view_dirs_2, c_dir2)) > 0);
end
% visualize the dense point cloud
if visualize
figure
for frame=1:frames.length-1
hold on
goodPoint = Graph{frame}.denseRepError < 0.05;
plot3(Graph{frame}.denseX(1,goodPoint),Graph{frame}.denseX(2,goodPoint),Graph{frame}.denseX(3,goodPoint),'.b','Markersize',1);
end
hold on
plot3(mergedGraph.Str(1,:),mergedGraph.Str(2,:),mergedGraph.Str(3,:),'.r')
axis equal
title('dense cloud')
for i=1:frames.length
drawCamera(mergedGraph.Mot(:,:,i), frames.imsize(2), frames.imsize(1), frames.K(1,1), 0.001,i*2-1);
end
axis tight
end
% output as ply file to open in Meshlab (Open Software available at http://meshlab.sourceforge.net )
plyPoint = [];
plyColor = [];
for frame=1:frames.length-1
goodPoint = (Graph{frame}.denseRepError < 0.05) & (Graph{frame}.cos_angles < cos(5 / 180 * pi)) & Graph{frame}.visible;
X = Graph{frame}.denseX(:,goodPoint);
% get the color of the point
P{1} = frames.K * mergedGraph.Mot(:,:,frame);
x1= P{1} * [X; ones(1,size(X,2))];
x1 = round(x1(1:2,:) ./ x1([3 3],:));
x1(1,:) = frames.imsize(2)/2 - x1(1,:);
x1(2,:) = frames.imsize(1)/2 - x1(2,:);
indlin = sub2ind(frames.imsize(1:2),x1(2,:),x1(1,:));
im = imresize(imread(frames.images{frame}),frames.imsize(1:2));
imR = im(:,:,1);
imG = im(:,:,2);
imB = im(:,:,3);
colorR = imR(indlin);
colorG = imG(indlin);
colorB = imB(indlin);
plyPoint = [plyPoint X];
plyColor = [plyColor [colorR; colorG; colorB]];
end
dense_file_name = strcat(string(data_seq_idx),'_dense.ply');
points2ply(dense_file_name,plyPoint,plyColor);
fprintf('SFMedu is finished.\n Open the result dense.ply in Meshlab (Open Software available at http://meshlab.sourceforge.net ).\n Enjoy!\n');
else
fprintf('Not running dense reconstruction. Done\n');
end
end
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/showMatches.m | .m | 420 | 14 | function showMatches(pair,frames)
image_i=imresize(imread(frames.images{pair.frames(1)}),frames.imsize(1:2));
image_j=imresize(imread(frames.images{pair.frames(2)}),frames.imsize(1:2));
figure
imshow(image_i);
hold on
plot(size(image_i,2)/2-pair.matches(1,:),size(image_i,1)/2-pair.matches(2,:),'r+');
figure
imshow(image_j);
hold on
plot(size(image_j,2)/2-pair.matches(3,:),size(image_j,1)/2-pair.matches(4,:),'r+');
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/RtFromE.m | .m | 814 | 36 | function Rtbest=RtFromE(pair,frames)
% Decompose Essential Matrix
[R1, R2, t1, t2] = PoseEMat(pair.E); % MVG Page 257-259
% Four possible solution
Rt(:,:,1) =[R1 t1];
Rt(:,:,2) =[R1 t2];
Rt(:,:,3) =[R2 t1];
Rt(:,:,4) =[R2 t2];
% triangulation
P{1} = frames.K * [eye(3) [0;0;0]];
goodCnt = zeros(1,4);
for i=1:4
clear X;
P{2} = frames.K * Rt(:,:,i);
for j=1:size(pair.matches,2)
X(:,j) = vgg_X_from_xP_nonlin(reshape(pair.matches(:,j),2,2),P,repmat([frames.imsize(2);frames.imsize(1)],1,2));
end
X = X(1:3,:) ./ X([4 4 4],:);
dprd = Rt(3,1:3,i) * ((X(:,:) - repmat(Rt(1:3,4,i),1,size(X,2))));
goodCnt(i) = sum(X(3,:)>0 & dprd > 0);
end
% pick one solution from the four
fprintf('%d\t%d\t%d\t%d\n',goodCnt);
[~, bestIndex]=max(goodCnt);
Rtbest = Rt(:,:,bestIndex);
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/SFMedu2.m | .m | 1,509 | 39 | clc;
disp('SFMedu: Structrue From Motion for Education Purpose');
disp('Version 2 @ 2014');
disp('Written by Jianxiong Xiao (MIT License).. Modifications by nick rhinehart');
%% set up things
clear;
close all;
addpath(genpath('matchSIFT'));
addpath(genpath('denseMatch'));
addpath(genpath('RtToolbox'));
%% parameters for visualization, allowing for focal length adjustment
% and graph merging strategy. change data_seq_idx to one of {0, 1, 2}
visualize = true;
adjust_focal_length = true;
do_sequential_match = false;
do_dense = true;
data_seq_idx = [0 3 4];
maxSize = 640;
% fprintf('running on head\n');
full_pipeline(data_seq_idx(1), adjust_focal_length, do_sequential_match, maxSize, visualize, do_dense);
% fprintf('running on iron\n');
full_pipeline(data_seq_idx(2), adjust_focal_length, do_sequential_match, maxSize, visualize, do_dense);
% fprintf('running on RI robo\n');
full_pipeline(data_seq_idx(3), adjust_focal_length, do_sequential_match, maxSize, visualize, do_dense);
% fprintf('running on head\n');
% full_pipeline(data_seq_idx, adjust_focal_length, do_sequential_match, maxSize, visualize, do_dense);
%
% data_seq_idx = data_seq_idx + 1;
% fprintf('running on couch\n');
% full_pipeline(data_seq_idx, adjust_focal_length, do_sequential_match, maxSize, visualize, do_dense);
%
% data_seq_idx = data_seq_idx + 1;
% fprintf('running on mug\n');
% full_pipeline(data_seq_idx, adjust_focal_length, do_sequential_match, maxSize, visualize, do_dense); | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/points2ply.m | .m | 1,439 | 51 | function points2ply(PLYfilename, coordinate, rgb)
% coordinate is 3 * n single matrix for n points
% rgb is 3 * n uint8 matrix for n points range [0, 255]
if size(coordinate,2)==3 && size(coordinate,1)~=3
coordinate = coordinate';
end
isValid = (~isnan(coordinate(1,:))) & (~isnan(coordinate(2,:))) & (~isnan(coordinate(3,:)));
coordinate = coordinate(:,isValid);
data = reshape(typecast(reshape(single(coordinate),1,[]),'uint8'),3*4,[]);
if nargin>2
if size(rgb,2)==3 && size(rgb,1)~=3
rgb = rgb';
end
if ~isa(rgb,'uint8')
if max(rgb(:))<=1
rgb = rgb * 255;
end
end
if isa(rgb,'double')
rgb = uint8(rgb);
end
rgb = rgb(:,isValid);
data = [data; rgb];
end
file = fopen(PLYfilename,'w');
fprintf (file, 'ply\n');
fprintf (file, 'format binary_little_endian 1.0\n');
fprintf (file, 'element vertex %d\n', size(data,2));
fprintf (file, 'property float x\n');
fprintf (file, 'property float y\n');
fprintf (file, 'property float z\n');
if nargin>2
fprintf (file, 'property uchar red\n');
fprintf (file, 'property uchar green\n');
fprintf (file, 'property uchar blue\n');
end
fprintf (file, 'end_header\n');
fwrite(file, data,'uint8');
fclose(file);
end
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/match2viewSURF.m | .m | 2,080 | 61 | function pair = match2viewSURF(frames, frameID_i, frameID_j)
% Tested on Matlab r2014a with Computer Vision System Toolbox
% Similar to image stiching, but instead of homograph, we estimate the fundamental matrix
colorA = imresize(imread(frames.images{frameID_i}),frames.imsize(1:2));
colorB = imresize(imread(frames.images{frameID_j}),frames.imsize(1:2));
pair.frames = [frameID_i, frameID_j];
imageA = rgb2gray(colorA);
imageB = rgb2gray(colorB);
ptsA = detectSURFFeatures(imageA,'MetricThreshold',100);
ptsB = detectSURFFeatures(imageB,'MetricThreshold',100);
[featuresA, validPtsA] = extractFeatures(imageA, ptsA);
[featuresB, validPtsB] = extractFeatures(imageB, ptsB);
index_pairs = matchFeatures(featuresA, featuresB,'Method','NearestNeighborSymmetric','MatchThreshold',2);
matchedPtsA = validPtsA(index_pairs(:,1));
matchedPtsB = validPtsB(index_pairs(:,2));
figure;
showMatchedFeatures(imageA,imageB,matchedPtsA,matchedPtsB,'montage');
title('Matched SURF points, including outliers');
matchedPtsA = [size(colorA,2)/2-matchedPtsA.Location(:,1) size(colorA,1)/2-matchedPtsA.Location(:,2)];
matchedPtsB = [size(colorB,2)/2-matchedPtsB.Location(:,1) size(colorB,1)/2-matchedPtsB.Location(:,2)];
pair.matches = [matchedPtsA'; matchedPtsB'];
%{
for i=1:length(matchedPtsA)
matchedPtsA(i).Location = [size(colorA,2)/2-matchedPtsA(i).Location(1) size(colorA,1)/2-matchedPtsA(i).Location(2)];
end
matchedPtsB.Location(:,1) = size(colorB,2)/2-matchedPtsB.Location(:,1);
matchedPtsB.Location(:,2) = size(colorB,1)/2-matchedPtsB.Location(:,2);
%}
%[fRANSAC,inlierPts] = estimateFundamentalMatrix(matchedPtsA,matchedPtsB,'Method', 'RANSAC', 'NumTrials', 2000, 'DistanceThreshold', 0.2);
%pair.F = fRANSAC;
%inlierPtsA = matchedPtsA(inlierPts,:);
%inlierPtsB = matchedPtsB(inlierPts,:);
%pair.matches = [inlierPtsA'; inlierPtsB'];
figure
imshow(colorA);
hold on
plot(size(colorA,2)/2-pair.matches(1,:),size(colorA,1)/2-pair.matches(2,:),'r+');
figure
imshow(colorB);
hold on
plot(size(colorB,2)/2-pair.matches(3,:),size(colorB,1)/2-pair.matches(4,:),'r+');
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/reprojectionResidual.m | .m | 1,016 | 46 | function residuals = reprojectionResidual(ObsIdx,ObsVal,px,py,f,Mot,Str)
% (Constant) ObsIdx: index of KxN for N points observed by K cameras, sparse matrix
% (Constant) ObsVal: 2xM for M observations
% px,py: princple points in pixels
% f: focal length in pixels
% Mot: 3x2xK for K cameras
% Str: 3xN for N points
nCam = size(ObsIdx,1);
if nargin==5
[Mot,Str,f] = unpackMotStrf(nCam,f);
elseif nargin==6
[Mot,Str] = unpackMotStrf(nCam,Mot);
end
Mot = reshape(Mot,3,2,[]);
Str = reshape(Str,3,[]);
residuals = [];
for c=1:nCam
validPts = ObsIdx(c,:)~=0;
validIdx = ObsIdx(c,validPts);
RP = AngleAxisRotatePts(Mot(:,1,c), Str(:,validPts));
TRX = RP(1,:) + Mot(1,2,c);
TRY = RP(2,:) + Mot(2,2,c);
TRZ = RP(3,:) + Mot(3,2,c);
TRXoZ = TRX./TRZ;
TRYoZ = TRY./TRZ;
x = f*TRXoZ + px;
y = f*TRYoZ + py;
ox = ObsVal(1,validIdx);
oy = ObsVal(2,validIdx);
residuals = [residuals [x-ox; y-oy]];
end
residuals = residuals(:);
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/removeOutlierPts.m | .m | 2,427 | 78 | function graph=removeOutlierPts(graph,threshold_in_pixels)
if exist('threshold_in_pixels','var')
threshold_in_pixels = threshold_in_pixels^2;
else
threshold_in_pixels = 10^2; % square it so that we don't need to square root everytime
end
threshold_in_degree = 2;
threshold_in_cos = cos(threshold_in_degree / 180 * pi);
for c=1:size(graph.ObsIdx,1)
X = f2K(graph.f) * transformPtsByRt(graph.Str,graph.Mot(:,:,c));
xy = X(1:2,:) ./ X([3 3],:);
selector = find(graph.ObsIdx(c,:)~=0);
diff = xy(:,selector) - graph.ObsVal(:,graph.ObsIdx(c,selector));
outliers = sum(diff.^2,1) > threshold_in_pixels;
if sum(outliers)>0
fprintf('remove %d outliers outof %d points with reprojection error bigger than %f pixels\n',sum(outliers),length(outliers), sqrt(threshold_in_pixels));
end
pts2keep = true(1,size(graph.Str,2));
pts2keep(selector(outliers)) = false;
graph.Str = graph.Str(:,pts2keep);
graph.ObsIdx = graph.ObsIdx(:,pts2keep);
end
% return
% Check viewing angle
num_frames = numel(graph.frames);
positions = zeros(3, num_frames);
for ii = 1:num_frames
Rt = graph.Mot(:, :, ii);
positions(:, ii) = - Rt(1:3, 1:3)' * Rt(:, 4);
end
view_dirs = zeros(3, size(graph.Str, 2), num_frames);
for c = 1:size(graph.ObsIdx, 1)
selector = find(graph.ObsIdx(c,:)~=0);
camera_view_dirs = bsxfun(@minus, graph.Str(:, selector), positions(:, c));
dir_length = sqrt(sum(camera_view_dirs .* camera_view_dirs));
camera_view_dirs = bsxfun(@times, camera_view_dirs, 1 ./ dir_length);
view_dirs(:, selector, c) = camera_view_dirs;
end
for c1 = 1:size(graph.ObsIdx, 1)
for c2 = 1:size(graph.ObsIdx, 1)
if c1 == c2
continue
end
selector = find(graph.ObsIdx(c1,:)~=0 & graph.ObsIdx(c2,:)~= 0);
view_dirs_1 = view_dirs(:, selector, c1);
view_dirs_2 = view_dirs(:, selector, c2);
cos_angles = sum(view_dirs_1 .* view_dirs_2);
outliers = cos_angles > threshold_in_cos;
if sum(outliers)>0
fprintf('remove %d outliers outof %d points with view angle less than %f degrees\n',sum(outliers),length(outliers), threshold_in_degree);
end
pts2keep = true(1,size(graph.Str,2));
pts2keep(selector(outliers)) = false;
graph.Str = graph.Str(:,pts2keep);
graph.ObsIdx = graph.ObsIdx(:,pts2keep);
end
end
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/reprojectionResidualFull.m | .m | 907 | 40 | function residuals = reprojectionResidualFull(ObsIdx,ObsVal,px,py,fx,fy,Mot,Str)
% (Constant) ObsIdx: index of KxN for N points observed by K cameras, sparse matrix
% (Constant) ObsVal: 2xM for M observations
% px,py: princple points in pixels
% f: focal length in pixels
% Mot: 3x2xK for K cameras
% Str: 3xN for N points
nCam = size(ObsIdx,1);
Mot = reshape(Mot,3,2,[]);
Str = reshape(Str,3,[]);
residuals = [];
for c=1:nCam
validPts = ObsIdx(c,:)~=0;
validIdx = ObsIdx(c,validPts);
RP = AngleAxisRotatePts(Mot(:,1,c), Str(:,validPts));
TRX = RP(1,:) + Mot(1,2,c);
TRY = RP(2,:) + Mot(2,2,c);
TRZ = RP(3,:) + Mot(3,2,c);
TRXoZ = TRX./TRZ;
TRYoZ = TRY./TRZ;
x = fx*TRXoZ + px;
y = fy*TRYoZ + py;
ox = ObsVal(1,validIdx);
oy = ObsVal(2,validIdx);
residuals = [residuals [x-ox; y-oy]];
end
residuals = residuals(:);
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/graph2K.m | .m | 106 | 7 | function K = graph2K(graph)
K = eye(3);
K(1,1) = graph.fx;
K(2,2) = graph.fy;
K(1, 3) = px;
K(2, 3) = py; | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/triangulate.m | .m | 653 | 30 | function graph=triangulate(graph,frames)
nPts = size(graph.Str,2);
X = zeros(4,nPts);
for i=1:nPts
validCamera = find(full(graph.ObsIdx(:,i)~=0))';
P=cell (1,length(validCamera));
x=zeros(2,length(validCamera));
cnt = 0;
for c=validCamera
cnt = cnt + 1;
% x (2-by-K matrix)
x(:,cnt) = graph.ObsVal(:,graph.ObsIdx(c,i));
% P (K-cell of 3-by-4 matrices)
P{cnt} = f2K(graph.f) * graph.Mot(:,:,c);
end
X(:,i) = vgg_X_from_xP_nonlin(x,P,repmat([frames.imsize(2);frames.imsize(1)],1,length(P)));
end
%X(isnan(X(:)))=1;
graph.Str = X(1:3,:) ./ X([4 4 4],:); | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/f2K.m | .m | 56 | 5 | function K = f2K(f)
K = eye(3);
K(1,1) = f;
K(2,2) = f; | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/vgg_X_from_xP_nonlin.m | .m | 1,745 | 82 | %vgg_X_from_xP_nonlin Estimation of 3D point from image matches and camera matrices, nonlinear.
% X = vgg_X_from_xP_lin(x,P,imsize) computes max. likelihood estimate of projective
% 3D point X (column 4-vector) from its projections in K images x (2-by-K matrix)
% and camera matrices P (K-cell of 3-by-4 matrices). Image sizes imsize (2-by-K matrix)
% are needed for preconditioning.
% By minimizing reprojection error, Newton iterations.
%
% X = vgg_X_from_xP_lin(x,P,imsize,X0) takes initial estimate of X. If X0 is omitted,
% it is computed by linear algorithm.
%
% See also vgg_X_from_xP_lin.
% werner@robots.ox.ac.uk, 2003
function X = vgg_X_from_xP_nonlin(u,P,imsize,X)
if iscell(P)
P = cat(3,P{:});
end
K = size(P,3);
if K < 2
error('Cannot reconstruct 3D from 1 image');
end
if nargin==3
X = vgg_X_from_xP_lin(u,P,imsize);
end
if nargin==2
X = vgg_X_from_xP_lin(u,P);
end
% precondition
if nargin>2
for k = 1:K
H = [2/imsize(1,k) 0 -1
0 2/imsize(2,k) -1
0 0 1];
P(:,:,k) = H*P(:,:,k);
u(:,k) = H(1:2,1:2)*u(:,k) + H(1:2,3);
end
end
% Parametrize X such that X = T*[Y;1]; thus x = P*T*[Y;1] = Q*[Y;1]
[dummy,dummy,T] = svd(X',0);
T = T(:,[2:end 1]);
for k = 1:K
Q(:,:,k) = P(:,:,k)*T;
end
% Newton
Y = [0;0;0];
eprev = inf;
for n = 1:10
[e,J] = resid(Y,u,Q);
if 1-norm(e)/norm(eprev) < 1000*eps
break
end
eprev = e;
Y = Y - (J'*J)\(J'*e);
end
X = T*[Y;1];
return
%%%%%%%%%%%%%%%%%%%%%%%%%%
function [e,J] = resid(Y,u,Q)
K = size(Q,3);
e = [];
J = [];
for k = 1:K
q = Q(:,1:3,k);
x0 = Q(:,4,k);
x = q*Y + x0;
e = [e; x(1:2)/x(3)-u(:,k)];
J = [J; [x(3)*q(1,:)-x(1)*q(3,:)
x(3)*q(2,:)-x(2)*q(3,:)]/x(3)^2];
end
return
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/pair2graph.m | .m | 355 | 18 | function graph=pair2graph(pair,frames)
graph = pair;
pointCount = size(pair.matches,2);
graph.f = frames.focal_length;
graph.Mot(:,:,1) = [eye(3) [0;0;0]];
graph.Mot(:,:,2) = pair.Rt;
graph.Str = zeros(3,pointCount);
graph.ObsVal = [pair.matches(1:2,:) pair.matches(3:4,:)];
graph.ObsIdx = sparse([1:pointCount; pointCount + (1:pointCount)]);
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/bundleAdjustmentFull.m | .m | 2,517 | 72 | function graph = bundleAdjustmentFull(graph)
% convert from Rt matrix to AngleAxis
nCam=length(graph.frames);
Mot = zeros(3,2,nCam);
for camera=1:nCam
Mot(:,1,camera) = RotationMatrix2AngleAxis(graph.Mot(:,1:3,camera));
Mot(:,2,camera) = graph.Mot(:,4,camera);
end
Str = graph.Str;
f = graph.f;
if ~isfield(graph, 'fx') || ~isfield(graph, 'fy') || ~isfield(graph, 'px') || ~isfield(graph, 'py')
fx = f;
fy = f;
px = 0;
py = 0;
else
fx = graph.fx;
fy = graph.fy;
px = graph.px;
py = graph.py;
end
residuals = reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,f,Mot,Str);
fprintf('initial error = %f\n', 2*sqrt(sum(residuals.^2)/length(residuals)));
% bundle adjustment using lsqnonlin in Matlab (Levenberg-Marquardt)
options = optimoptions(@lsqnonlin,'Algorithm','levenberg-marquardt','Display','off');
% adjust structure [for homework]
% !!! fill in your code here
[vec,resnorm,residuals,exitflag] = lsqnonlin(@(optme) reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,f,optme, Str), [Mot(:)],[],[],options);
% reshape optimized result
Mot = reshape(vec(1:6*nCam), 3, 2, []);
fprintf('motion only error = %f\n', 2*sqrt(resnorm/length(residuals)));
% adjust motion [for homework]
[vec,resnorm,residuals,exitflag] = lsqnonlin(@(optme) reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,f,Mot,optme), [Str(:)],[],[],options);
% reshape optimized result\
Str = reshape(vec, 3, []);
% !!! fill in your code here
fprintf('structure only error = %f\n', 2*sqrt(resnorm/length(residuals)));
% adjust motion and structure
[vec,resnorm,residuals,exitflag] = lsqnonlin(@(x) reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,f,x), [Mot(:); Str(:)],[],[],options);
% [Mot,Str] = unpackMotStrf(nCam,vec);
fprintf('S&M error = %f\n', 2*sqrt(resnorm/length(residuals)));
[vec,resnorm,residuals,exitflag] = lsqnonlin(@(x) reprojectionResidualFull(graph.ObsIdx,graph.ObsVal,px,py,fx,fy,Mot,Str), [fx; fy; px; py; Mot(:); Str(:)],[],[],options);
[Mot,Str,fxnew,fynew,pxnew,pynew] = unpackMotStrFull(nCam,vec);
fprintf('S&M error with adjust focal length = %f\n', resnorm/length(residuals));
graph.f = fxnew;
graph.fx = fxnew;
graph.fy = fynew;
graph.px = pxnew;
graph.py = pynew;
%residuals = reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,f,Mot,Str);
%fprintf('final error = %f\n', 2*sqrt(sum(residuals.^2)/length(residuals)));
for camera=1:nCam
graph.Mot(:,:,camera) = [AngleAxis2RotationMatrix(Mot(:,1,camera)) Mot(:,2,camera)];
end
graph.Str = Str;
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/vgg_X_from_xP_lin.m | .m | 1,095 | 46 | %vgg_X_from_xP_lin Estimation of 3D point from image matches and camera matrices, linear.
% X = vgg_X_from_xP_lin(x,P,imsize) computes projective 3D point X (column 4-vector)
% from its projections in K images x (2-by-K matrix) and camera matrices P (K-cell
% of 3-by-4 matrices). Image sizes imsize (2-by-K matrix) are needed for preconditioning.
% By minimizing algebraic distance.
%
% See also vgg_X_from_xP_nonlin.
% werner@robots.ox.ac.uk, 2003
function X = vgg_X_from_xP_lin(u,P,imsize)
if iscell(P)
P = cat(3,P{:});
end
K = size(P,3);
if nargin>2
for k = 1:K
H = [2/imsize(1,k) 0 -1
0 2/imsize(2,k) -1
0 0 1];
P(:,:,k) = H*P(:,:,k);
u(:,k) = H(1:2,1:2)*u(:,k) + H(1:2,3);
end
end
A = [];
for k = 1:K
A = [A; vgg_contreps([u(:,k);1])*P(:,:,k)];
end
% A = normx(A')';
[dummy,dummy,X] = svd(A,0);
X = X(:,end);
% Get orientation right
s = reshape(P(3,:,:),[4 K])'*X;
if any(s<0)
X = -X;
if any(s>0)
% warning('Inconsistent orientation of point match');
end
end
return
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/printReprojectionError.m | .m | 378 | 11 | function printReprojectionError(graph)
Error = 0;
for c=1:size(graph.ObsIdx,1)
X = f2K(graph.f) * transformPtsByRt(graph.Str,graph.Mot(:,:,c));
xy = X(1:2,:) ./ X([3 3],:);
selector = find(graph.ObsIdx(c,:)~=0);
diff = xy(:,selector) - graph.ObsVal(:,graph.ObsIdx(c,selector));
Error = Error + norm(diff);
end
fprintf('total reprojection error: %.3f\n', E); | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/outputPly.m | .m | 964 | 35 | function outputPly(PLYfilename,plyPoint,plyColor)
% SFMedu: Structrue From Motion for Education Purpose
% Written by Jianxiong Xiao (MIT License)
count = size(plyPoint,2);
fprintf('Writing ply point cloud file: ');
file = fopen(PLYfilename,'w');
fprintf (file, 'ply\n');
fprintf (file, 'format binary_little_endian 1.0\n');
fprintf (file, 'element vertex %d\n', count);
fprintf (file, 'property float x\n');
fprintf (file, 'property float y\n');
fprintf (file, 'property float z\n');
fprintf (file, 'property uchar red\n');
fprintf (file, 'property uchar green\n');
fprintf (file, 'property uchar blue\n');
fprintf (file, 'end_header\n');
for i=1:size(plyPoint,2)
fwrite(file, plyPoint(1,i),'float');
fwrite(file, plyPoint(2,i),'float');
fwrite(file, plyPoint(3,i),'float');
fwrite(file, plyColor(1,i),'uint8');
fwrite(file, plyColor(2,i),'uint8');
fwrite(file, plyColor(3,i),'uint8');
end
fprintf('done \n');
fclose(file);
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/unpackMotStrf.m | .m | 301 | 12 | function [Mot,Str,f] = unpackMotStrf(nCam,vec)
if mod(length(vec),3)==0
cut = 3*2*nCam;
Mot = reshape(vec(1:cut),3,2,[]);
Str = reshape(vec(cut+1:end),3,[]);
else
cut = 1+3*2*nCam;
f = vec(1);
Mot = reshape(vec(2:cut),3,2,[]);
Str = reshape(vec(cut+1:end),3,[]);
end | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/ransac5point.m | .m | 3,580 | 131 | function [E, inliers] = ransac5point(x1, x2, t, K, feedback)
% written by Fisher Yu @ 2014
if ~all(size(x1)==size(x2))
error('Data sets x1 and x2 must have the same dimension');
end
if nargin == 4
feedback = 0;
end
[rows,npts] = size(x1);
if rows~=2 && rows~=3
error('x1 and x2 must have 2 or 3 rows');
end
if rows == 2 % Pad data with homogeneous scale factor of 1
x1 = [x1; ones(1,npts)];
x2 = [x2; ones(1,npts)];
end
K_inv = inv(K);
x1 = K_inv * x1;
x2 = K_inv * x2;
s = 5; % Number of points needed to fit a fundamental matrix. Note that
% only 7 are needed but the function 'fundmatrix' only
% implements the 8-point solution.
fittingfn = @fit5point;
distfn = @funddist;
degenfn = @isdegenerate;
% x1 and x2 are 'stacked' to create a 6xN array for ransac
[E, inliers] = ransac([x1; x2], fittingfn, distfn, degenfn, s, t, feedback);
end
function Es = fit5point(x)
E = peig5pt(x(1:3, :), x(4:6, :));
num_solutions = size(E, 1) / 3;
Es = cell(num_solutions, 1);
for ii = 1:num_solutions
Ei = E((ii*3-2):(ii*3), :);
Ei = Ei ./ Ei(3,3);
Es{ii} = Ei;
end
end
%--------------------------------------------------------------------------
% Function to evaluate the first order approximation of the geometric error
% (Sampson distance) of the fit of a fundamental matrix with respect to a
% set of matched points as needed by RANSAC. See: Hartley and Zisserman,
% 'Multiple View Geometry in Computer Vision', page 270.
%
% Note that this code allows for F being a cell array of fundamental matrices of
% which we have to pick the best one. (A 7 point solution can return up to 3
% solutions)
function [bestInliers, bestF] = funddist(F, x, t)
x1 = x(1:3,:); % Extract x1 and x2 from x
x2 = x(4:6,:);
if iscell(F) % We have several solutions each of which must be tested
nF = length(F); % Number of solutions to test
bestF = F{1}; % Initial allocation of best solution
ninliers = 0; % Number of inliers
for k = 1:nF
x2tFx1 = zeros(1,length(x1));
for n = 1:length(x1)
x2tFx1(n) = x2(:,n)'*F{k}*x1(:,n);
end
Fx1 = F{k}*x1;
Ftx2 = F{k}'*x2;
% Evaluate distances
d = x2tFx1.^2 ./ ...
(Fx1(1,:).^2 + Fx1(2,:).^2 + Ftx2(1,:).^2 + Ftx2(2,:).^2);
inliers = find(abs(d) < t); % Indices of inlying points
if length(inliers) > ninliers % Record best solution
ninliers = length(inliers);
bestF = F{k};
bestInliers = inliers;
end
end
else % We just have one solution
x2tFx1 = zeros(1,length(x1));
for n = 1:length(x1)
x2tFx1(n) = x2(:,n)'*F*x1(:,n);
end
Fx1 = F*x1;
Ftx2 = F'*x2;
% Evaluate distances
d = x2tFx1.^2 ./ ...
(Fx1(1,:).^2 + Fx1(2,:).^2 + Ftx2(1,:).^2 + Ftx2(2,:).^2);
bestInliers = find(abs(d) < t); % Indices of inlying points
bestF = F; % Copy F directly to bestF
end
end
%----------------------------------------------------------------------
% (Degenerate!) function to determine if a set of matched points will result
% in a degeneracy in the calculation of a fundamental matrix as needed by
% RANSAC. This function assumes this cannot happen...
function r = isdegenerate(x)
r = 0;
end
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/unpackMotStrFull.m | .m | 211 | 11 | function [Mot,Str,fx,fy,px,py] = unpackMotStrFull(nCam,vec)
cut = 3*2*nCam;
fx = vec(1);
fy = vec(2);
px = vec(3);
py = vec(4);
Mot = reshape(vec(5:(cut + 4)),3,2,[]);
Str = reshape(vec(cut+5:end),3,[]);
end | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/xy.m | .m | 0 | 0 | null | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/vgg_contreps.m | .m | 1,355 | 36 | function Y = vgg_contreps(X)
% vgg_contreps Contraction with epsilon tensor.
%
% B = vgg_contreps(A) is tensor obtained by contraction of A with epsilon tensor.
% However, it works only if the argument and result fit to matrices, in particular:
%
% - if A is row or column 3-vector ... B = [A]_x
% - if A is skew-symmetric 3-by-3 matrix ... B is row 3-vector such that A = [B]_x
% - if A is skew-symmetric 4-by-4 matrix ... then A can be interpreted as a 3D line Pluecker matrix
% skew-symmetric 4-by-4 B as its dual Pluecker matrix.
% - if A is row 2-vector ... B = [0 1; -1 0]*A', i.e., A*B=eye(2)
% - if A is column 2-vector ... B = A'*[0 1; -1 0], i.e., B*A=eye(2)
%
% It is vgg_contreps(vgg_contreps(A)) = A.
% werner@robots.ox.ac.uk, Oct 2001
if prod(size(X)) == 3 % get [X]_\times
Y = [0 X(3) -X(2)
-X(3) 0 X(1)
X(2) -X(1) 0];
elseif all(size(X) == [1 2])
Y = [0 1; -1 0]*X';
elseif all(size(X) == [2 1])
Y = X'*[0 1; -1 0];
elseif all(size(X) == [3 3]) % get X from [X]_\times
Y = [X(2,3) X(3,1) X(1,2)];
elseif all(size(X) == [4 4]) % pluecker matrix dual
Y = [0 X(3,4) X(4,2) X(2,3)
X(4,3) 0 X(1,4) X(3,1)
X(2,4) X(4,1) 0 X(1,2)
X(3,2) X(1,3) X(2,1) 0 ];
else
error('Wrong matrix size.')
end | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/bundleAdjustment.m | .m | 2,584 | 70 | function graph = bundleAdjustment(graph, adjustFocalLength)
% convert from Rt matrix to AngleAxis
nCam=length(graph.frames);
Mot = zeros(3,2,nCam);
for camera=1:nCam
Mot(:,1,camera) = RotationMatrix2AngleAxis(graph.Mot(:,1:3,camera));
Mot(:,2,camera) = graph.Mot(:,4,camera);
end
Str = graph.Str;
f = graph.f;
fx = f;
fy = f;
% assume px, py=0
px = 0;
py = 0;
residuals = reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,f,Mot,Str);
fprintf('initial error = %f\n', 2*sqrt(sum(residuals.^2)/length(residuals)));
% bundle adjustment using lsqnonlin in Matlab (Levenberg-Marquardt)
options = optimoptions(@lsqnonlin,'Algorithm','levenberg-marquardt','Display','off');
% adjust structure [for homework]
% !!! fill in your code here
[vec,resnorm,residuals,exitflag] = lsqnonlin(@(optme) reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,f,optme, Str), [Mot(:)],[],[],options);
% reshape optimized result
Mot = reshape(vec(1:6*nCam), 3, 2, []);
fprintf('motion only error = %f\n', 2*sqrt(resnorm/length(residuals)));
% adjust motion [for homework]
[vec,resnorm,residuals,exitflag] = lsqnonlin(@(optme) reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,f,Mot,optme), [Str(:)],[],[],options);
% reshape optimized result\
Str = reshape(vec, 3, []);
% !!! fill in your code here
fprintf('structure only error = %f\n', 2*sqrt(resnorm/length(residuals)));
% adjust motion and structure
[vec,resnorm,residuals,exitflag] = lsqnonlin(@(x) reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,f,x), [Mot(:); Str(:)],[],[],options);
% [Mot,Str] = unpackMotStrf(nCam,vec);
fprintf('S&M error = %f\n', 2*sqrt(resnorm/length(residuals)));
if exist('adjustFocalLength','var') && adjustFocalLength
% adjust focal length, motion and structure
% [vec,resnorm,residuals,exitflag] = lsqnonlin(@(x) reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,x), [f; Mot(:); Str(:)],[],[],options);
% [Mot,Str,f] = unpackMotStrf(nCam,vec);
[vec,resnorm,residuals,exitflag] = lsqnonlin(@(x) reprojectionResidualFull(graph.ObsIdx,graph.ObsVal,px,py,fx,fy,Mot,Str), [fx; fy; px; py; Mot(:); Str(:)],[],[],options);
[Mot,Str,fx,fy,px,py] = unpackMotStrFull(nCam,vec);
fprintf('S&M error with adjust focal length = %f\n', resnorm/length(residuals));
graph.f = fx;
end
%residuals = reprojectionResidual(graph.ObsIdx,graph.ObsVal,px,py,f,Mot,Str);
%fprintf('final error = %f\n', 2*sqrt(sum(residuals.^2)/length(residuals)));
for camera=1:nCam
graph.Mot(:,:,camera) = [AngleAxis2RotationMatrix(Mot(:,1,camera)) Mot(:,2,camera)];
end
graph.Str = Str;
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/RtToolbox/inverseRt.m | .m | 91 | 4 | function RtOut = inverseRt(RtIn)
RtOut = [RtIn(1:3,1:3)', - RtIn(1:3,1:3)'* RtIn(1:3,4)];
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/RtToolbox/concatenateRts.m | .m | 165 | 6 | function Rt = concatenateRts(RtOuter, RtInner)
% Rt * X = RtOuter * RtInner * X
Rt = [RtOuter(:,1:3)* RtInner(:,1:3) RtOuter(:,1:3)*RtInner(:,4) + RtOuter(:,4)];
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/RtToolbox/AngleAxis2RotationMatrix.m | .m | 1,351 | 38 | function R = AngleAxis2RotationMatrix(angle_axis)
theta2 = dot(angle_axis,angle_axis);
if (theta2 > 0.0)
% We want to be careful to only evaluate the square root if the
% norm of the angle_axis vector is greater than zero. Otherwise
% we get a division by zero.
theta = sqrt(theta2);
wx = angle_axis(1) / theta;
wy = angle_axis(2) / theta;
wz = angle_axis(3) / theta;
costheta = cos(theta);
sintheta = sin(theta);
R(1+0, 1+0) = costheta + wx*wx*(1 - costheta);
R(1+1, 1+0) = wz*sintheta + wx*wy*(1 - costheta);
R(1+2, 1+0) = -wy*sintheta + wx*wz*(1 - costheta);
R(1+0, 1+1) = wx*wy*(1 - costheta) - wz*sintheta;
R(1+1, 1+1) = costheta + wy*wy*(1 - costheta);
R(1+2, 1+1) = wx*sintheta + wy*wz*(1 - costheta);
R(1+0, 1+2) = wy*sintheta + wx*wz*(1 - costheta);
R(1+1, 1+2) = -wx*sintheta + wy*wz*(1 - costheta);
R(1+2, 1+2) = costheta + wz*wz*(1 - costheta);
else
% At zero, we switch to using the first order Taylor expansion.
R(1+0, 1+0) = 1;
R(1+1, 1+0) = -angle_axis(3);
R(1+2, 1+0) = angle_axis(2);
R(1+0, 1+1) = angle_axis(3);
R(1+1, 1+1) = 1;
R(1+2, 1+1) = -angle_axis(1);
R(1+0, 1+2) = -angle_axis(2);
R(1+1, 1+2) = angle_axis(1);
R(1+2, 1+2) = 1;
end
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/RtToolbox/AngleAxisRotatePts.m | .m | 1,457 | 53 | function result = AngleAxisRotatePts(angle_axis, pt)
angle_axis = reshape(angle_axis(1:3),1,3);
theta2 = dot(angle_axis,angle_axis);
if (theta2 > 0.0)
% Away from zero, use the rodriguez formula
%
% result = pt costheta + (w x pt) * sintheta + w (w . pt) (1 - costheta)
%
% We want to be careful to only evaluate the square root if the
% norm of the angle_axis vector is greater than zero. Otherwise
% we get a division by zero.
theta = sqrt(theta2);
w = angle_axis / theta;
costheta = cos(theta);
sintheta = sin(theta);
% w_cross_pt = cross(w, pt);
w_cross_pt = xprodmat(w) * pt;
%w_dot_pt = dot(w, pt);
w_dot_pt = w * pt;
result = pt * costheta + w_cross_pt * sintheta + (w' * (1 - costheta)) * w_dot_pt;
else
% Near zero, the first order Taylor approximation of the rotation
% matrix R corresponding to a vector w and angle w is
%
% R = I + hat(w) * sin(theta)
%
% But sintheta ~ theta and theta * w = angle_axis, which gives us
%
% R = I + hat(w)
%
% and actually performing multiplication with the point pt, gives us
% R * pt = pt + w x pt.
%
% Switching to the Taylor expansion at zero helps avoid all sorts
% of numerical nastiness.
%w_cross_pt = cross(angle_axis, pt);
w_cross_pt = xprodmat(angle_axis) * pt; % vectorize version
result = pt + w_cross_pt;
end
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/RtToolbox/transformPtsByRt.m | .m | 209 | 8 | function Y3D = transformPtsByRt(X3D, Rt, isInverse)
if nargin<3 || ~isInverse
Y3D = Rt(:,1:3) * X3D + repmat(Rt(:,4),1,size(X3D,2));
else
Y3D = Rt(:,1:3)' * (X3D - repmat(Rt(:,4),1,size(X3D,2)));
end
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/RtToolbox/xprodmat.m | .m | 335 | 26 | function A=xprodmat(a)
%Matrix representation of a cross product
%
% A=xprodmat(a)
%
%in:
%
% a: 3D vector
%
%out:
%
% A: a matrix such that A*b=cross(a,b)
if length(a)<3, error 'Input must be a vector of length 3'; end
ax=a(1);
ay=a(2);
az=a(3);
A=zeros(3);
A(2,1)=az; A(1,2)=-az;
A(3,1)=-ay; A(1,3)=ay;
A(3,2)=ax; A(2,3)=-ax;
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/RtToolbox/RotationMatrix2AngleAxis.m | .m | 2,817 | 71 | function angle_axis = RotationMatrix2AngleAxis(R)
% The conversion of a rotation matrix to the angle-axis form is
% numerically problematic when then rotation angle is close to zero
% or to Pi. The following implementation detects when these two cases
% occurs and deals with them by taking code paths that are guaranteed
% to not perform division by a small number.
% x = k * 2 * sin(theta), where k is the axis of rotation.
angle_axis(1) = R(1+2, 1+1) - R(1+1, 1+2);
angle_axis(2) = R(1+0, 1+2) - R(1+2, 1+0);
angle_axis(3) = R(1+1, 1+0) - R(1+0, 1+1);
% Since the right hand side may give numbers just above 1.0 or
% below -1.0 leading to atan misbehaving, we threshold.
costheta = min(max((R(1+0, 1+0) + R(1+1, 1+1) + R(1+2, 1+2) - 1.0) / 2.0, -1.0), 1.0);
% sqrt is guaranteed to give non-negative results, so we only
% threshold above.
sintheta = min(sqrt(angle_axis(1) * angle_axis(1) + angle_axis(2) * angle_axis(2) + angle_axis(3) * angle_axis(3)) / 2.0, 1.0);
% Use the arctan2 to get the right sign on theta
theta = atan2(sintheta, costheta);
% Case 1: sin(theta) is large enough, so dividing by it is not a
% problem. We do not use abs here, because while jets.h imports
% std::abs into the namespace, here in this file, abs resolves to
% the int version of the function, which returns zero always.
%
% We use a threshold much larger then the machine epsilon, because
% if sin(theta) is small, not only do we risk overflow but even if
% that does not occur, just dividing by a small number will result
% in numerical garbage. So we play it safe.
kThreshold = 1e-12;
if ((sintheta > kThreshold) || (sintheta < -kThreshold))
r = theta / (2.0 * sintheta);
angle_axis = angle_axis * r;
return;
end
% Case 2: theta ~ 0, means sin(theta) ~ theta to a good
% approximation.
if (costheta > 0.0)
angle_axis = angle_axis * 0.5;
return;
end
% Case 3: theta ~ pi, this is the hard case. Since theta is large,
% and sin(theta) is small. Dividing by theta by sin(theta) will
% either give an overflow or worse still numerically meaningless
% results. Thus we use an alternate more complicated formula
% here.
% Since cos(theta) is negative, division by (1-cos(theta)) cannot
% overflow.
inv_one_minus_costheta = 1.0 / (1.0 - costheta);
% We now compute the absolute value of coordinates of the axis
% vector using the diagonal entries of R. To resolve the sign of
% these entries, we compare the sign of angle_axis[i]*sin(theta)
% with the sign of sin(theta). If they are the same, then
% angle_axis[i] should be positive, otherwise negative.
for i=1:3
angle_axis(i) = theta * sqrt((R(i, i) - costheta) * inv_one_minus_costheta);
if (((sintheta < 0.0) && (angle_axis(i) > 0.0)) || ((sintheta > 0.0) && (angle_axis(i) < 0.0)))
angle_axis(i) = -angle_axis(i);
end
end
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/RtToolbox/transformDirByRt.m | .m | 69 | 5 | function DirT = transformDirByRt(Dir,Rt)
DirT = Rt(1:3,1:3) * Dir;
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/denseMatch.m | .m | 1,899 | 66 | function pair = denseMatch(pair, frames, frameID_i, frameID_j)
% SFMedu: Structrue From Motion for Education Purpose
% Written by Jianxiong Xiao (MIT License)
im1=imresize(imread(frames.images{frameID_i}),frames.imsize(1:2));
im2=imresize(imread(frames.images{frameID_j}),frames.imsize(1:2));
HalfSizePropagate = 2;
%%
im1 = double(im1)/256;
if size(im1,3)==3
im1 = ( 76 * im1(:,:,1) + 150 * im1(:,:,2) + 30 * im1(:,:,3) ) / 256;
end
matchable_image1 = ReliableArea(im1);
zncc1 = ZNCCpatch_all(im1,HalfSizePropagate);
%%
im2 = double(im2)/256;
if size(im2,3)==3
im2 = ( 76 * im2(:,:,1) + 150 * im2(:,:,2) + 30 * im2(:,:,3) ) / 256;
end
matchable_image2 = ReliableArea(im2);
zncc2 = ZNCCpatch_all(im2,HalfSizePropagate);
%%
initial_match = round([size(im1,1)/2-pair.matches(2,:)', size(im1,2)/2-pair.matches(1,:)', size(im1,1)/2-pair.matches(4,:)', size(im1,2)/2-pair.matches(3,:)', zeros(size(pair.matches,2),1)]);
HalfSizePropagate = 2;
match_pair= propagate(initial_match, [], [], matchable_image1, matchable_image2, zncc1, zncc2, HalfSizePropagate );
%imshow(im1)
%plot(pair.denseMatch(:,2)' ,pair.denseMatch(:,1)' ,'.')
pair.denseMatch = match_pair;
pair.denseMatch(:,1) = size(im1,1)/2-pair.denseMatch(:,1);
pair.denseMatch(:,2) = size(im1,2)/2-pair.denseMatch(:,2);
pair.denseMatch(:,3) = size(im2,1)/2-pair.denseMatch(:,3);
pair.denseMatch(:,4) = size(im2,2)/2-pair.denseMatch(:,4);
pair.denseMatch = pair.denseMatch';
pair.denseMatch = pair.denseMatch([2 1 4 3 5],:);
%imshow(im1)
%hold on
%plot(pair.denseMatch(1,:) ,pair.denseMatch(2,:),'.')
%{
figure
colorScale = 3;
im1 = im2double(im1)/colorScale;
lin1= sub2ind(size(im1),match_pair(:,1),match_pair(:,2));
im1(lin1) = im1(lin1)*colorScale;
im2 = im2double(im2)/colorScale;
lin2= sub2ind(size(im2),match_pair(:,3),match_pair(:,4));
im2(lin2) = im2(lin2)*colorScale;
imMatch = [im1 im2];
imshow(imMatch);
%} | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/ReliableArea.m | .m | 254 | 8 | function rim = ReliableArea(im)
rim = max( max( abs(im-im([2:end 1],:)) , abs(im-im([end 1:end-1],:)) ), max( abs(im-im(:,[2:end 1])) , abs(im-im(:,[end 1:end-1])) ));
rim([1 end], :) = 0;
rim(:, [1 end]) = 0;
rim = ( rim < 0.01 );
rim = double(1-rim);
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/ZNCCpatch_all.m | .m | 684 | 16 | function zncc = ZNCCpatch_all(im, HalfSizeWindow)
zncc = zeros(size(im,1),size(im,2), (2*HalfSizeWindow + 1)^2);
k = 1;
for x=-HalfSizeWindow:HalfSizeWindow
for y=-HalfSizeWindow:HalfSizeWindow
zncc( (HalfSizeWindow + 1):(end-HalfSizeWindow),(HalfSizeWindow + 1):(end-HalfSizeWindow),k) = ...
im((HalfSizeWindow+1+x):(end-HalfSizeWindow+x),(HalfSizeWindow+1+y):(end-HalfSizeWindow+y));
k = k+1;
end
end
zncc_mean = mean(zncc,3);
zncc_deviation = sqrt( sum(zncc .^ 2,3) - ( (2*HalfSizeWindow + 1) * zncc_mean) .^2 );
zncc = ( zncc - repmat(zncc_mean,[1 1 (2*HalfSizeWindow + 1)^2]) ) ./ repmat(zncc_deviation,[1 1 (2*HalfSizeWindow + 1)^2]);
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/propagate.m | .m | 4,847 | 115 | function [match_pair match_im_i match_im_j] = propagate(initial_match, im_i, im_j, matchable_im_i, matchable_im_j, zncc_i, zncc_j , WinHalfSize)
%{
Please cite this paper if you use this code.
J. Xiao, J. Chen, D.-Y. Yeung, and L. Quan
Learning Two-view Stereo Matching
Proceedings of the 10th European Conference on Computer Vision (ECCV2008)
Springer Lecture Notes in Computer Science (LNCS), Pages 15-27
%}
%addpath('priority_queue_1.0');
CostMax = 0.5;
visualization = false;
if visualization
figure
h0 = subplot(2,1,1); imshow(im_i); hold(h0,'on');
h1 = subplot(2,1,2); imshow(im_j); hold(h1,'on');
end
match_im_i(:,:,1) = matchable_im_i - 2; match_im_i(:,:,2) = match_im_i(:,:,1);
match_im_j(:,:,1) = matchable_im_j - 2; match_im_j(:,:,2) = match_im_j(:,:,1);
maxMatchingNumber = min(numel(matchable_im_i),numel(matchable_im_j));
MaxIndexValidMatch = max(numel(matchable_im_i),numel(matchable_im_j));
NbMaxStartMatch = MaxIndexValidMatch+5*5*9;
match_heap = pq_create(NbMaxStartMatch*25+MaxIndexValidMatch);
match_pair = initial_match;
for match_pair_size = 1:size(match_pair,1)
%descriptor_i = ZNCCpatch(im_i, match_pair(match_pair_size,1:2), WinHalfSize);
%descriptor_j = ZNCCpatch(im_j, match_pair(match_pair_size,3:4), WinHalfSize);
%match_pair(match_pair_size,5) = sum(sum(descriptor_i{1} .* descriptor_j{1}));
match_pair(match_pair_size,5) = sum(zncc_i(match_pair(match_pair_size,1),match_pair(match_pair_size,2),:) ...
.* zncc_j(match_pair(match_pair_size,3),match_pair(match_pair_size,4),:));
pq_push(match_heap, match_pair_size, match_pair(match_pair_size,5));
end
while ( maxMatchingNumber >=0 && pq_size(match_heap) > 0 )
[bestIndex,bestPriority] = pq_pop(match_heap);
x0 = match_pair(bestIndex,1); y0 = match_pair(bestIndex,2);
x1 = match_pair(bestIndex,3); y1 = match_pair(bestIndex,4);
%fprintf('%d %d %d %d\n',y0-1,x0-1,y1-1,x1-1);
if visualization
plot(h0,[y0],[x0],'y+'); plot(h1,[y1],[x1],'y+');
end
xMin0= max(WinHalfSize+1, x0-WinHalfSize);
xMax0= min(size(matchable_im_i,1)-WinHalfSize, x0+WinHalfSize+1);
yMin0= max(WinHalfSize+1, y0-WinHalfSize);
yMax0= min(size(matchable_im_i,2)-WinHalfSize, y0+WinHalfSize+1);
xMin1= max(WinHalfSize+1, x1-WinHalfSize);
xMax1= min(size(matchable_im_j,1)-WinHalfSize, x1+WinHalfSize+1);
yMin1= max(WinHalfSize+1, y1-WinHalfSize);
yMax1= min(size(matchable_im_j,2)-WinHalfSize, y1+WinHalfSize+1);
local_heap = [];
for yy0= yMin0:yMax0
for xx0= xMin0:xMax0
if ( match_im_i(xx0,yy0,1) == -1 )
xx= (xx0+x1)-x0;
yy= (yy0+y1)-y0;
for yy1= max(yMin1, yy-1) : min(yMax1, yy+2)
for xx1 = max(xMin1, xx-1) : min(xMax1, xx+2)
if ( match_im_j(xx1,yy1,1) == -1 )
%descriptor_i = ZNCCpatch(im_i, [xx0,yy0], WinHalfSize);
%descriptor_j = ZNCCpatch(im_j, [xx1,yy1], WinHalfSize);
%AuxCost= sum(sum(descriptor_i{1} .* descriptor_j{1}));
AuxCost= sum(zncc_i(xx0,yy0,:) .* zncc_j(xx1,yy1,:));
if (1- AuxCost <= CostMax) % IsEpipolarValidMatch(xx0,yy0,xx1,yy1) &&
% fprintf('add to local heap %d %d %d %d\n',yy0-1,xx0-1,yy1-1,xx1-1);
local_heap(end+1,:) = [xx0,yy0,xx1,yy1,AuxCost];
end
end
end
end
end
end
end
if size(local_heap,1) > 0
[sorted_value sorted_idx] = sort(local_heap(:,5),'descend');
local_heap = local_heap(sorted_idx,:);
for bestIndex=1:size(local_heap,1)
xx0 = local_heap(bestIndex,1); yy0 = local_heap(bestIndex,2);
xx1 = local_heap(bestIndex,3); yy1 = local_heap(bestIndex,4);
if ( match_im_i(xx0,yy0,1) < 0 && match_im_j(xx1,yy1,1) < 0 )
match_im_i(xx0,yy0,:) = [xx1,yy1];
match_im_j(xx1,yy1,:) = [xx0,yy0];
match_pair_size = match_pair_size + 1;
match_pair(match_pair_size,:) = local_heap(bestIndex,:);
pq_push(match_heap, match_pair_size, local_heap(bestIndex,5));
maxMatchingNumber = maxMatchingNumber - 1;
% fprintf('add to global heap %d %d %d %d\n',yy0-1,xx0-1,yy1-1,xx1-1);
if visualization
plot(h0,[yy0],[xx0],'r.');
plot(h1,[yy1],[xx1],'r.');
end
end
end
end
%fprintf('Heap size = %d\n',pq_size(match_heap));
end
match_pair = match_pair( size(initial_match,1)+1:end, :);
pq_delete(match_heap);
| MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_pop.m | .m | 725 | 26 | % PQ_POP removes the topmost element of the priority queue
%
% SYNTAX
% [idx, cost] = pq_pop(pq)
%
% INPUT PARAMETERS
% pq: a pointer to the priority queue
%
% OUTPUT PARAMETERS
% idx: the index of the popped element
% cost: the cost of the popped element
%
% DESCRIPTION
% Removes the topmost element from a priority queue and return its content.
%
% See also:
% PQ_DEMO, PQ_CREATE, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_push.m | .m | 902 | 29 | % PQ_PUSH inserts a new element/update a cost in the priority queue
%
% SYNTAX
% pq_push(pq, idx, cost)
%
% INPUT PARAMETERS
% pq: a pointer to the priority queue
%
% OUTPUT PARAMETERS
% idx: the index of the element
% cost: the cost of the newly inserted element or the
% cost to which the element should be updated to
%
% DESCRIPTION
% Inserts a new element in the priority queue. If the elements already
% exist (elements identified by their "idx"), its cost is updated and a new
% element will not be inserted.
%
% See also:
% PQ_DEMO, PQ_CREATE, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_pop.cpp | .cpp | 2,033 | 62 | //==============================================================================
// Name : pq_pop.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : pops an element from the PQ and returns its index and cost
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#ifdef MATLAB_MEX_FILE
#include "mex.h"
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
void retrieve_heap( const mxArray* matptr, MaxHeap<double>* & heap){
// retrieve pointer from the MX form
double* pointer0 = mxGetPr(matptr);
// check that I actually received something
if( pointer0 == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
// convert it to "long" datatype (good for addresses)
long pointer1 = (long) pointer0[0];
// convert it to "KDTree"
heap = (MaxHeap<double>*) pointer1;
// check that I actually received something
if( heap == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
if( nrhs!=1 )
mexErrMsgTxt("This function requires 3 arguments\n");
if( !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("parameter 1 missing!\n");
// retrieve the heap
MaxHeap<double>* heap;
retrieve_heap( prhs[0], heap);
// extract head before popping
pair<double, int> curr = heap->top();
plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
*mxGetPr(plhs[0]) = curr.second+1;
plhs[1] = mxCreateDoubleMatrix(1, 1, mxREAL);
*mxGetPr(plhs[1]) = curr.first;
// pop top element in the PQ
heap->pop();
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_size.cpp | .cpp | 1,845 | 53 | //==============================================================================
// Name : pq_size.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : Returns the size of the priority queue
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
#ifdef MATLAB_MEX_FILE
#include "mex.h"
void retrieve_heap( const mxArray* matptr, MaxHeap<double>* & heap){
// retrieve pointer from the MX form
double* pointer0 = mxGetPr(matptr);
// check that I actually received something
if( pointer0 == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
// convert it to "long" datatype (good for addresses)
long pointer1 = (long) pointer0[0];
// convert it to "KDTree"
heap = (MaxHeap<double>*) pointer1;
// check that I actually received something
if( heap == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
if( nrhs!=1 )
mexErrMsgTxt("This function requires 1 arguments\n");
if( !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("parameter 1 missing!\n");
// retrieve the heap
MaxHeap<double>* heap;
retrieve_heap( prhs[0], heap);
// pop top element in the PQ
plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
*mxGetPr(plhs[0]) = heap->size();
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_demo.cpp | .cpp | 7,872 | 330 | #include "MyHeap.h"
#include <stdlib.h>
#include <cmath>
#include <iostream>
#include <algorithm>
using namespace std;
/// TEST FOR MAXHEAP
int test1(){
MaxHeap<double> pq(7); // heap of ints compared by doubles
pq.push( 1,1 );
pq.push( 6,0 );
pq.push( 2,4 );
pq.push( 3,3 );
pq.push( 4,2 );
pq.push( 5,5 );
// test the updates
pq.push( 6,5 );
pq.push( 3,7 );
// check ordering
while( !pq.empty() ){
cout << "[ " << pq.top().second << " " << pq.top().first << " ] " << endl;
pq.pop();
}
return 0;
}
/// Indexing test, check whether tree indexes are behaving properly. A bug here caused a long headache...
int test2(){
cout << "0:" << endl;
cout << PARENT(0) << endl;
cout << LEFT(0) << endl;
cout << RIGHT(0) << endl << endl;
cout << "1:" << endl;
cout << PARENT(1) << endl;
cout << LEFT(1) << endl;
cout << RIGHT(1) << endl << endl;
cout << "2:" << endl;
cout << PARENT(2) << endl;
cout << LEFT(2) << endl;
cout << RIGHT(2) << endl << endl;
cout << "3:" << endl;
cout << PARENT(3) << endl;
cout << LEFT(3) << endl;
cout << RIGHT(3) << endl << endl;
cout << "7:" << endl;
cout << PARENT(7) << endl;
cout << LEFT(7) << endl;
cout << RIGHT(7) << endl << endl;
return 0;
}
/// EXTENSIVE (RANDOM) TEST OF PUSH/POP
int test3(){
int N = 100000;
MaxHeap<double> pq(N); // heap of ints compared by doubles
for (int n=0; n < N; n++) {
// create random push and updates
double key = float(rand()) / RAND_MAX;
pq.push( key, n );
//Verify at each step O(n)
//if( pq.verifyHeap() == false ){
// cout << "error on pusching of " << n << endl;
// exit(0);
//}
}
// verify that exhaustive pop is done in decreasing
double prev_max = 1; // since we provide keys [0,1)
while( !pq.empty() ){
pair<double, int> curr = pq.top();
if( curr.first > prev_max ){
cout << "BUG: " << curr.first << " > " << prev_max << endl;
exit(0);
} else {
prev_max = curr.first;
}
pq.pop();
}
cout << "terminated correctly" << endl;
exit(0);
}
/// MAXHEAP EXTENSIVE (RANDOM) TEST OF RANDOM ACCESS UPDATE
int test4(){
// create base queue
int N = 100000;
MaxHeap<double> pq(N); // heap of ints compared by doubles
vector<double> costs(N); // cache costs
for (int n=0; n < N; n++) {
// create random push and updates
double key = float(rand()) / RAND_MAX;
costs[ n ] = key;
pq.push( key, n );
}
// create random updated (increase in value) to random elements
for (int n=0; n < N; n++) {
// create random push and updates
int index = rand() % N;
double keyupdate = ( float(rand()) / RAND_MAX ) / 50; // small update
costs[index] += keyupdate;
pq.push( costs[index], index );
}
// verify that exhaustive pop is done in decreasing
double prev_max = 2; // since we provide keys [0,1)
while( !pq.empty() ){
pair<double, int> curr = pq.top();
if( curr.first > prev_max ){
cout << "BUG: " << curr.first << " > " << prev_max << endl;
exit(0);
} else {
prev_max = curr.first;
}
pq.pop();
}
cout << "terminated correctly" << endl;
exit(0);
}
/// MINHEAP EXTENSIVE (RANDOM) TEST OF RANDOM ACCESS UPDATE
int test5(){
// create base queue
int N = 100000;
MinHeap<double> pq(N); // heap of ints compared by doubles
vector<double> costs(N); // cache costs
for (int n=0; n < N; n++) {
// create random push and updates
double key = float(rand()) / RAND_MAX;
costs[ n ] = key;
pq.push( key, n );
}
// create random updated (increase in value) to random elements
for (int n=0; n < N; n++) {
// create random push and updates
int index = rand() % N;
double keyupdate = ( float(rand()) / RAND_MAX ) / 50; // small update
costs[index] -= keyupdate;
pq.push( costs[index], index );
}
// verify that exhaustive pop is done in decreasing
double prev_min = -2; // since we provide keys [0,1)
while( !pq.empty() ){
pair<double, int> curr = pq.top();
if( curr.first < prev_min ){
cout << "BUG: " << curr.first << " < " << prev_min << endl;
exit(0);
} else {
prev_min = curr.first;
}
pq.pop();
}
cout << "terminated correctly" << endl;
exit(0);
}
/// FULL HEAP SORT + MATLAB-STYLE BACKINDEXES
int test6(){
int N = 10;
MinHeap<double> heap(N);
vector<double> data(N);
for (int n=0; n < N; n++) {
double key = round( (float(rand()) / RAND_MAX)*10 );
data.push_back(key);
heap.push( key, n );
}
heap.print();
// sort back indexes
vector<int> indexes;
indexes.reserve(10);
heap.heapsort( indexes );
// output sorted element indexes
for( int n=0; n<N; n++)
cout << indexes[n] << " ";
return 0;
}
/// MINHEAP: HEAP SORT & BACKINDEXES (KD-TREE related test)
int test7(){
// MULTIMEDIAN (KD-TREE)
int N = 10;
MinHeap<double> heap_x(N);
MinHeap<double> heap_y(N);
MinHeap<double> heap_z(N);
vector< vector<double> > data(N, vector<double>(3,0));
vector<double> idxs_x(N);
vector<double> idxs_y(N);
vector<double> idxs_z(N);
for (int n=0; n < N; n++) {
double key_x = round( (float(rand()) / RAND_MAX)*10 );
double key_y = round( (float(rand()) / RAND_MAX)*10 );
double key_z = round( (float(rand()) / RAND_MAX)*10 );
data[n][0] = key_x;
data[n][1] = key_y;
data[n][2] = key_z;
// fill the heaps
heap_x.push( key_x, n );
heap_y.push( key_y, n );
heap_z.push( key_z, n );
}
// print out original points
cout << "data" << endl;
for( int n=0; n<N; n++)
cout << n << " ";
cout << endl << "---------------------------" << endl;
for( int n=0; n<N; n++)
cout << data[n][0] << " ";
cout << endl;
for( int n=0; n<N; n++)
cout << data[n][1] << " ";
cout << endl;
for( int n=0; n<N; n++)
cout << data[n][2] << " ";
cout << endl << endl;
vector< vector<int> > indexes( 3, vector<int>(N,0) ); //back indexes
heap_x.heapsort( indexes[0] );
heap_y.heapsort( indexes[1] );
heap_z.heapsort( indexes[2] );
//indexes from data offset to position in sortex k-th dimension
cout << "back indexes" << endl;
for( int n=0; n<N; n++)
cout << indexes[0][n] << " ";
cout << endl;
for( int n=0; n<N; n++)
cout << indexes[1][n] << " ";
cout << endl;
for( int n=0; n<N; n++)
cout << indexes[2][n] << " ";
return 0;
}
/// MINHEAP: No backindexed heap structure, just test push/pop. Used in kNN kd-trees
int test8(){
// create random inserts
int N = 100000;
MinHeap<double> pq; // heap of ints key-ed by double
for (int n=0; n < N; n++) {
// create random push and updates
double key = double(rand()) / RAND_MAX;
pq.push( key, n );
}
// verify that exhaustive pop is done in decreasing
double prev_min = -2; // since rand provide keys [0,1)
while( !pq.empty() ){
pair<double, int> curr = pq.top();
if( curr.first < prev_min ){
cout << "BUG: " << curr.first << " < " << prev_min << endl;
exit(1);
}
else
prev_min = curr.first;
pq.pop();
}
return 0;
}
//class CMP{
// public double k;
// public int i;
//};
bool comp(const int &a, const int &b){
return a>b;
}
int test9(){
int a[8] = {1,2,3,4,5,6,7,8};
vector<int> v1(a, a+8);
// use one of the following
std::make_heap(v1.begin(), v1.end(), comp );
std::sort_heap(v1.begin (), v1.end (), comp );
for (unsigned int i=0; i < v1.size(); ++i)
cout << v1[i] << " ";
cout << endl;
return 0;
}
int test10(){
int a[7] = {1,2,3,4,5,6,7};
vector<int> v(a, a+7);
vector<int> larray(4);
vector<int> rarray(3);
std::copy(v.begin(), v.end()-v.size()/2, larray.begin());
std::copy(v.end()-v.size()/2, v.end(), rarray.begin());
for (int i=0; i < v.size(); ++i)
cout << v[i] << " ";
cout << endl;
for (int i=0; i < larray.size(); ++i)
cout << larray[i] << " ";
cout << endl;
for (int i=0; i < rarray.size(); ++i)
cout << rarray[i] << " ";
cout << endl;
return 0;
}
////////////// MAIN //////////////
int main(int argc, char **argv) {
// test1();
// test2();
// test3();
// test4();
// test5();
// test6();
// test7();
// test8();
// test9();
test10();
}
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/MyHeap_old.h | .h | 13,341 | 474 | /**
* @file MyHeaps.h
* @author Andrea Tagliasacchi
* @data 26 March 2008
* @copyright (c) Andrea Tagliasacchi - All rights reserved
*/
#ifndef MYHEAPS_H_
#define MYHEAPS_H_
#include <vector>
#include <exception> // general exception
#include <stdexcept> // out_of_range
#include <iostream>
#include <cassert>
#include <algorithm>
#include "float.h"
using namespace std;
// macros for navigation in the hard coded binary tree
#define PARENT(pos) ((pos-1)>>1) // equivalent to floor(pos/2)
#define LEFT(pos) ((pos<<1)+1) // equivalent to pos*2 + 1
#define RIGHT(pos) ((pos<<1)+2) // equivalent to pos*2 + 2
/// EXCEPTION
class HeapEmptyException : public out_of_range{
public:
HeapEmptyException(const string &message) : out_of_range(message) {;}
};
class InvalidKeyIncreaseException : public out_of_range{
public:
InvalidKeyIncreaseException(const string &message) : out_of_range(message) {;}
};
class InvalidIndexException : public out_of_range{
public:
InvalidIndexException(const string &message) : out_of_range(message) {;}
};
/**
* This class provides a back-inxedex heap structure where indexes of
* elements already in the heap are kept updated to allow for random access
* update of elements (done automatically in push if element with
* "idx" is already contained in the heap )
*
* Refer to the following textbook for details:
* @book{cormen1990ia,
* title={{Introduction to algorithms}},
* author={Cormen, T.T. and Leiserson, C.E. and Rivest, R.L.},
* year={1990},
* publisher={MIT Press Cambridge, MA, USA}
* }
*/
template <class Tkey>
class MaxHeap{
private:
/// root is assumed to be at end of the vector
vector< pair<Tkey,int> > heap;
/**
* maintain a list of back indexes.
* * -1 not in heap
* * other index that point to cell in vector heap
*/
vector< int > backIdx;
/**
* If useBackIdx==false it means that the current structure
* is not making use of a backindexed heap. Thus, no update
* is available
*/
bool useBackIdx;
public:
/// Simple constructor with NO cross updates
MaxHeap(){
useBackIdx = false;
}
/// back indexes constructor used for cross updates
MaxHeap( int Nindex ){
// initialize the back indexes with pseudo-null pointers
backIdx.resize( Nindex, -1 );
useBackIdx = true;
}
/// pushes a new value in the heap
void push( Tkey key, int index ){
//cout << "pushing " << index << endl;
if( useBackIdx && index >= (int) backIdx.size() )
throw InvalidIndexException("the index in the push must be smaller than the maximal allowed index (specified in constructor)");
// If key is not in backindexes or there is no backindexes AT ALL.... complete push (no update)
if( !useBackIdx ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
// recursive call to increase key
heapIncreaseKey( heap.size()-1, key );
}
else{
if( backIdx[index] == -1 ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
//initially point to back
backIdx[ index ] = heap.size()-1;
// recursive call to increase key
heapIncreaseKey( heap.size()-1, key );
// USE STL STUFF
//push_heap(heap.begin(),heap.end());
}
// update push (a key exists)
else {
heapIncreaseKey( backIdx[index], key );
}
}
}
/// return a constant reference to the MINIMAL KEY element stored in the head of the heap
const pair<Tkey,int>& top() throw(HeapEmptyException){
if( heap.empty() )
throw new HeapEmptyException("Impossible to get top element, empty heap");
else
return heap[0];
}
/// removes the top element of the queue (minimal)
void pop() throw(HeapEmptyException){
if( heap.size() < 1 ) //a.k.a. heap.empty()
throw new HeapEmptyException("heap underflow");
// overwrite top with tail element
heap[0] = heap.back();
// USE STL FUNCTIONALITIES (NOT ALLOW BACKINDEXs)
//pop_heap(heap.begin(), heap.end());
// shorten the vector
heap.pop_back();
// start heapify from root
maxHeapify(0);
}
/// returns the size of the heap
int size(){
return heap.size();
}
/// check for emptyness
bool empty(){
return heap.empty();
}
/// check recursively if the substructures is correct using STL provided algorithm
bool verifyHeap( ){
return std::__is_heap(heap.begin(), heap.end() );
}
private:
/// check and applies MaxHeap Correctness down the subtree with index "currIdx"
void maxHeapify(int currIdx){
unsigned int leftIdx = LEFT( currIdx );
unsigned int rightIdx = RIGHT( currIdx );
// decide if and where ta swap, left or right, then swap
// current is the best choice (defalut)
int largestIdx;
// is left a better choice? (exists an invalid placed bigger value on the left side)
if( leftIdx < heap.size() && heap[leftIdx].first > heap[currIdx].first )
largestIdx = leftIdx;
else
largestIdx = currIdx;
// is right a better choice? (exists an invalid placed bigger value on the right side)
if( rightIdx < heap.size() && heap[rightIdx].first > heap[largestIdx].first )
largestIdx = rightIdx;
// a better choice exists?
if( largestIdx != currIdx ){
// swap elements
swap( currIdx, largestIdx );
// recursively call this function on alterated subtree
maxHeapify( largestIdx );
}
}
/// swap the content of two elements in position pos1 and pos2
void swap(int pos1, int pos2){
assert( !heap.empty() );
assert( pos1>=0 && pos1<(int)heap.size() );
assert( pos2>=0 && pos2<(int)heap.size() );
// update backindexes
if( useBackIdx ){
backIdx[ heap[pos1].second ] = pos2;
backIdx[ heap[pos2].second ] = pos1;
}
// update heap
pair<Tkey,int> temp = heap[pos1];
heap[pos1] = heap[pos2];
heap[pos2] = temp;
}
/// propagates the correctness (in heap sense) down from a vertex currIdx
void heapIncreaseKey( int currIdx, Tkey key ){
// check if given key update is actually an increase
if( key < heap[currIdx].first )
throw InvalidKeyIncreaseException("In MaxHeaps only increase key updates are legal");
// update value with current key
heap[currIdx].first = key;
// traverse the tree up making necessary swaps
int parentIdx = PARENT(currIdx);
while( currIdx > 0 ){
if( heap[ parentIdx ].first < heap[ currIdx ].first ){
// make swap
swap( currIdx, parentIdx );
// move up
currIdx = parentIdx;
parentIdx = PARENT(currIdx);
} else {
break;
}
}
}
/// print an internal representation of the heap (debug purposes)
void print() {
cout << "idxs";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].second << " ";
cout << endl;
cout << "csts";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].first << " ";
cout << endl;
// cout << "";
// for ( int i=0; i < size(); i++)
// cout << heap[i].first << " in off: " << backIdx[heap[i].first] << ", ";
// cout << endl;
cout << endl;
}
};
/**
* This class provides a back-inxedex heap (MinHeap) structure where indexes of
* elements already in the heap are kept updated to allow for random access
* update of elements (done automatically in push if element with
* "idx" is already contained in the heap )
*
* Refer to the following textbook for details:
* @book{cormen1990ia,
* title={{Introduction to algorithms}},
* author={Cormen, T.T. and Leiserson, C.E. and Rivest, R.L.},
* year={1990},
* publisher={MIT Press Cambridge, MA, USA}
* }
*/
template <class Tkey>
class MinHeap{
private:
/// root is assumed to be at end of the vector
vector< pair<Tkey,int> > heap;
/**
* maintain a list of back indexes.
* * -1 not in heap
* * other index that point to cell in vector heap
*/
vector< int > backIdx;
/**
* If useBackIdx==false it means that the current structure
* is not making use of a backindexed heap. Thus, no update
* is available
*/
bool useBackIdx;
public:
/// back indexes constructor used for cross updates
MinHeap( int Nindex ){
// initialize the back indexes with pseudo-null pointers
backIdx.resize( Nindex, -1 );
useBackIdx = true;
}
/// Simple constructor with NO cross updates
MinHeap(){
useBackIdx = false;
}
/// pushes a new value in the heap
void push( Tkey key, int index ){
//cout << "pushing " << index << endl;
if( useBackIdx && index >= (int) backIdx.size() )
throw InvalidIndexException("the index in the push must be smaller than the maximal allowed index (specified in constructor)");
// If key is not in backindexes or there is no backindexes AT ALL.... complete push (no update)
if( !useBackIdx ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
// recursive call to increase key
heapDecreaseKey( heap.size()-1, key );
}
else{
if( useBackIdx || backIdx[index] == -1 ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
//initially point to back
backIdx[ index ] = heap.size()-1;
// recursive call to increase key
heapDecreaseKey( heap.size()-1, key );
// USE STL STUFF
//push_heap(heap.begin(),heap.end());
}
// update push (a key exists)
else {
heapDecreaseKey( backIdx[index], key );
}
}
}
/// return a constant reference to the MINIMAL KEY element stored in the head of the heap
const pair<Tkey,int>& top() throw(HeapEmptyException){
if( heap.empty() )
throw new HeapEmptyException("Impossible to get top element, empty heap");
else
return heap[0];
}
/// removes the top element of the queue (minimal)
void pop() throw(HeapEmptyException){
if( heap.size() < 1 ) //a.k.a. heap.empty()
throw new HeapEmptyException("heap underflow");
// overwrite top with tail element
heap[0] = heap.back();
// USE STL FUNCTIONALITIES (NOT ALLOW BACKINDEXs)
//pop_heap(heap.begin(), heap.end());
// shorten the vector
heap.pop_back();
// start heapify from root
minHeapify(0);
}
/// returns the size of the heap
int size(){
return heap.size();
}
/// check for emptyness
bool empty(){
return heap.empty();
}
// this does not work, how do you provide a new ordering function to is_heap??
/// check recursively if the substructures is correct using STL provided algorithm
//bool verifyHeap( ){
// return std::__is_heap(heap.begin(), heap.end() );
//}
/// computes full heap sort and returns the corresponding indexing structure
/// Requires the indexes to be allocated already.
void heapsort(vector<int>& indexes){
// until empty... keep popping
int i = 0;
while( empty() == false ){
pair<Tkey,int> t = top();
pop();
indexes[i++] = t.second;
}
}
private:
/// check and applies MaxHeap Correctness down the subtree with index "currIdx"
void minHeapify(int currIdx){
unsigned int leftIdx = LEFT( currIdx );
unsigned int rightIdx = RIGHT( currIdx );
// decide if and where ta swap, left or right, then swap
// current is the best choice (defalut)
int smallerIdx;
// is left a better choice? (exists an invalid placed smaller value on the left side)
if( leftIdx < heap.size() && heap[leftIdx].first < heap[currIdx].first )
smallerIdx = leftIdx;
else
smallerIdx = currIdx;
// is right a better choice? (exists an invalid placed smaller value on the right side)
if( rightIdx < heap.size() && heap[rightIdx].first < heap[smallerIdx].first )
smallerIdx = rightIdx;
// a better choice exists?
if( smallerIdx != currIdx ){
// swap elements
swap( currIdx, smallerIdx );
// recursively call this function on alterated subtree
minHeapify( smallerIdx );
}
}
/// swap the content of two elements in position pos1 and pos2
void swap(int pos1, int pos2){
assert( !heap.empty() );
assert( pos1>=0 && pos1<(int)heap.size() );
assert( pos2>=0 && pos2<(int)heap.size() );
// update backindexes
if( useBackIdx ){
backIdx[ heap[pos1].second ] = pos2;
backIdx[ heap[pos2].second ] = pos1;
}
// update heap
pair<Tkey,int> temp = heap[pos1];
heap[pos1] = heap[pos2];
heap[pos2] = temp;
}
/// propagates the correctness (in heap sense) down from a vertex currIdx
void heapDecreaseKey( int currIdx, Tkey key ){
// check if given key update is actually an increase
if( key > heap[currIdx].first )
throw InvalidKeyIncreaseException("In MinHeaps only decrease in key updates are legal");
// update value with current key
heap[currIdx].first = key;
// traverse the tree up making necessary swaps
int parentIdx = PARENT(currIdx);
while( currIdx > 0 ){
if( heap[ parentIdx ].first > heap[ currIdx ].first ){
// make swap
swap( currIdx, parentIdx );
// move up
currIdx = parentIdx;
parentIdx = PARENT(currIdx);
} else {
break;
}
}
}
/// print an internal representation of the heap (debug purposes)
public: void print() {
cout << "idxs";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].second << " ";
cout << endl;
cout << "csts";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].first << " ";
cout << endl;
// cout << "";
// for ( int i=0; i < size(); i++)
// cout << heap[i].first << " in off: " << backIdx[heap[i].first] << ", ";
// cout << endl;
cout << endl;
}
};
#endif /*MYHEAPS_H_*/
| Unknown |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_push.cpp | .cpp | 2,968 | 91 | //==============================================================================
// Name : pq_push.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : inserts a new element in the priority queue
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
#ifdef MATLAB_MEX_FILE
#include "mex.h"
void retrieve_heap( const mxArray* matptr, MaxHeap<double>* & heap){
// retrieve pointer from the MX form
double* pointer0 = mxGetPr(matptr);
// check that I actually received something
if( pointer0 == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
// convert it to "long" datatype (good for addresses)
long pointer1 = (long) pointer0[0];
// convert it to "KDTree"
heap = (MaxHeap<double>*) pointer1;
// check that I actually received something
if( heap == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
}
void retrieve_index( const mxArray* matptr, int& index){
// check that I actually received something
if( matptr == NULL )
mexErrMsgTxt("missing second parameter (element index)\n");
if( 1 != mxGetM(matptr) || !mxIsNumeric(matptr) || 1 != mxGetN(matptr) )
mexErrMsgTxt("second parameter should be a unique integer array index\n");
// retrieve index
index = (int) mxGetScalar(matptr);
if( index % 1 != 0 )
mexErrMsgTxt("the index should have been an integer!\n");
}
void retrieve_cost( const mxArray* matptr, double& cost){
// check that I actually received something
if( matptr == NULL )
mexErrMsgTxt("missing third parameter (element index)\n");
if( 1 != mxGetM(matptr) || !mxIsNumeric(matptr) || 1 != mxGetN(matptr) )
mexErrMsgTxt("second parameter should be a unique integer array index\n");
// retrieve index
cost = (double) mxGetScalar(matptr);
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
if( nrhs!=3 )
mexErrMsgTxt("This function requires 3 arguments\n");
if( !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("parameter 1 missing!\n");
if( !mxIsNumeric(prhs[1]) )
mexErrMsgTxt("parameter 2 missing!\n");
if( !mxIsNumeric(prhs[2]) )
mexErrMsgTxt("parameter 3 missing!\n");
// retrieve the heap
MaxHeap<double>* heap;
retrieve_heap( prhs[0], heap);
// retrieve the parameters
int index;
retrieve_index( prhs[1], index );
double cost;
retrieve_cost( prhs[2], cost);
// push in the PQ
heap->push( cost, index-1 );
// return control to matlab
return;
}
#endif
| C++ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.