repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
XFL | XFL-master/python/algorithm/model/densenet.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This model contains a PyTorch implementation of the paper "Densely Connected Convolutional Networks."[1]
# [1]Huang, G., Liu, Z., Weinberger, K. Q., & van der Maaten, L. (2016). Densely connected convolutional networks. arXiv preprint arXiv:1608.06993.
from collections import OrderedDict
import math
import torch.nn as nn
import torch
import torch.nn.functional as F
class BottleNeckBlock(nn.Module):
expansion = 4
def __init__(self, in_planes, growth_rate, drop_out=0.0):
super().__init__()
self.conv_block1 = nn.Sequential(OrderedDict([
("batch_norm1", nn.BatchNorm2d(in_planes, track_running_stats=True)), # setting track_running_stats as False
("relu1", nn.ReLU()),
("conv1", nn.Conv2d(in_planes, self.expansion*growth_rate, kernel_size=1, stride=1, bias=False))
]))
self.conv_block2 = nn.Sequential(OrderedDict([
("batch_norm2", nn.BatchNorm2d(self.expansion*growth_rate, track_running_stats=True)),
("relu2", nn.ReLU()),
("conv2", nn.Conv2d(self.expansion*growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),
]))
self.drop_out = drop_out
def forward(self, x):
out = self.conv_block1(x)
if self.drop_out:
out = F.dropout(out, p=self.drop_out, training=self.training)
out = self.conv_block2(out)
if self.drop_out:
out = F.dropout(out, p=self.drop_out, training=self.training)
return torch.cat([x, out], 1)
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, drop_out=0.0):
super().__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.drop_out = drop_out
def forward(self, x):
out = self.conv(self.relu(self.bn(x)))
if self.drop_out:
out = F.dropout(out, p=self.drop_out, training=self.training)
return F.avg_pool2d(out, 2)
class Densenet(nn.Module):
def __init__(self, block, block_list, num_classes, growth_rate=12, reduction=0.5, drop_out=0.0):
super().__init__()
self.growth_rate = growth_rate
self.drop_out = drop_out
in_planes = 2 * growth_rate
self.conv = nn.Conv2d(3, in_planes, kernel_size=3, padding=1, bias=False)
self.dense_layer1 = self._make_layers(block, block_list[0], in_planes)
in_planes += block_list[0]*growth_rate
self.transition1 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), drop_out=drop_out)
in_planes = int(math.floor(in_planes*reduction))
self.dense_layer2 = self._make_layers(block, block_list[1], in_planes)
in_planes += block_list[1]*growth_rate
self.transition2 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), drop_out=drop_out)
in_planes = int(math.floor(in_planes*reduction))
self.dense_layer3 = self._make_layers(block, block_list[2], in_planes)
in_planes += block_list[2]*growth_rate
self.transition3 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), drop_out=drop_out)
in_planes = int(math.floor(in_planes*reduction))
self.dense_layer4 = self._make_layers(block, block_list[3], in_planes)
in_planes += block_list[3]*growth_rate
self.batchnorm = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_planes, num_classes)
def forward(self, x):
x = self.conv(x)
x = self.transition1(self.dense_layer1(x))
x = self.transition2(self.dense_layer2(x))
x = self.transition3(self.dense_layer3(x))
x = self.dense_layer4(x)
x = self.relu(self.batchnorm(x))
x = self.avgpool(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
return x
def _make_layers(self, block, blocks, in_planes):
layers = []
for i in range(blocks):
layers.append(block(in_planes, self.growth_rate, drop_out=self.drop_out))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def DenseNet(num_classes, layers):
if layers == 121:
return Densenet(BottleNeckBlock, [6,12,24,16], num_classes, growth_rate=32)
elif layers == 169:
return Densenet(BottleNeckBlock, [6,12,32,32], num_classes, growth_rate=32)
elif layers == 201:
return Densenet(BottleNeckBlock, [6,12,48,32], num_classes, growth_rate=32)
elif layers == 264:
return Densenet(BottleNeckBlock, [6,12,64,48], num_classes, growth_rate=32)
elif layers == 'unit_test':
return Densenet(BottleNeckBlock, [2,2,2,2], num_classes, growth_rate=8)
else:
raise NotImplementedError("Only support DenseNet121, DenseNet169, DenseNet201, DenseNet264 currently, please change layers")
| 5,629 | 41.651515 | 147 | py |
XFL | XFL-master/python/algorithm/model/resnet_paddle.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This model contains a PaddlePaddle implementation of the paper "Deep Residual Learning for Image Recognition."[1]
# [1]He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770-778).
from collections import OrderedDict
import paddle
from paddle import nn
class ConvBlock(nn.Layer):
expansion = 4
def __init__(self, in_channels, out_channels, downsample=None, stride=1):
super().__init__()
self.stem = nn.Sequential(
("conv1", nn.Conv2D(in_channels, out_channels,
kernel_size=1, stride=1, padding=0)),
("batch_norm1", nn.BatchNorm2D(out_channels)),
("relu1", nn.ReLU()),
("conv2", nn.Conv2D(out_channels, out_channels,
kernel_size=3, stride=stride, padding=1)),
("batch_norm2", nn.BatchNorm2D(out_channels)),
("relu2", nn.ReLU()),
("conv3", nn.Conv2D(out_channels, out_channels *
self.expansion, kernel_size=1, stride=1, padding=0)),
("batch_norm3", nn.BatchNorm2D(out_channels*self.expansion))
)
self.downsample = downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
residual = x
x = self.stem(x)
if self.downsample is not None:
residual = self.downsample(residual)
x += residual
x = self.relu(x)
return x
class Resnet(nn.Layer):
def __init__(self, ResBlock, block_list, num_classes):
super().__init__()
self.stem = nn.Sequential(
("conv1", nn.Conv2D(3, 64, kernel_size=3, stride=1, padding=1, bias_attr=False)),
("batch_norm1", nn.BatchNorm2D(64)),
("relu", nn.ReLU())
)
self.max_pool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
self.layers1 = self._make_layers(
ResBlock, block_list[0], inplanes=64, outplanes=64, stride=1)
self.layers2 = self._make_layers(
ResBlock, block_list[1], inplanes=256, outplanes=128, stride=2)
self.layers3 = self._make_layers(
ResBlock, block_list[2], inplanes=512, outplanes=256, stride=2)
self.layers4 = self._make_layers(
ResBlock, block_list[3], inplanes=1024, outplanes=512, stride=2)
self.avgpool = nn.AdaptiveAvgPool2D((1, 1))
self.fc = nn.Linear(512*ResBlock.expansion, num_classes)
def forward(self, x):
x = self.stem(x)
x = self.max_pool(x)
x = self.layers1(x)
x = self.layers2(x)
x = self.layers3(x)
x = self.layers4(x)
x = self.avgpool(x)
x = x.reshape([x.shape[0], -1])
x = self.fc(x)
return x
def _make_layers(self, ResBlock, blocks, inplanes, outplanes, stride=1):
layers =[]
downsample = None
if stride != 1 or inplanes != outplanes*ResBlock.expansion:
downsample = nn.Sequential(
nn.Conv2D(inplanes, outplanes*ResBlock.expansion,
kernel_size=1, stride=stride),
nn.BatchNorm2D(outplanes*ResBlock.expansion)
)
layers.append(ResBlock(inplanes, outplanes,
downsample=downsample, stride=stride))
for i in range(1, blocks):
layers.append(ResBlock(outplanes*ResBlock.expansion, outplanes))
return nn.Sequential(*layers)
def ResNet(num_classes, layers):
if layers == "unit_test":
return Resnet(ConvBlock, [2, 2, 2, 2], num_classes)
elif layers == 50:
return Resnet(ConvBlock, [3, 4, 6, 3], num_classes)
elif layers == 101:
return Resnet(ConvBlock, [3, 4, 23, 3], num_classes)
elif layers == 152:
return Resnet(ConvBlock, [3, 8, 36, 3], num_classes)
else:
raise NotImplementedError("Only support ResNet50, ResNet101, ResNet152 currently, please change layers")
| 4,619 | 36.868852 | 192 | py |
XFL | XFL-master/python/algorithm/model/linear_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class LinearRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, 1, bias=bias)
def forward(self, x):
return self.linear(x)
| 907 | 32.62963 | 74 | py |
XFL | XFL-master/python/algorithm/model/poisson_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class PoissonRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super(PoissonRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, 1, bias=bias)
def forward(self, x):
return torch.exp(self.linear(x))
| 917 | 33 | 74 | py |
XFL | XFL-master/python/algorithm/model/logistic_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
class LogisticRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super().__init__()
self.linear = nn.Linear(input_dim, 1, bias=bias)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.linear(x)
x = self.sigmoid(x)
return x
| 954 | 31.931034 | 74 | py |
XFL | XFL-master/python/algorithm/model/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/horizontal/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/horizontal/poisson_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
class HorizontalPoissonRegressionLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,035 | 36.018182 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/poisson_regression/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalPoissonRegressionAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,539 | 43 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/poisson_regression/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from algorithm.model.poisson_regression import PoissonRegression
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
from common.utils.config_sync import ConfigSynchronizer
from common.utils.logger import logger
class Common(BaseTrainer):
def __init__(self, train_conf: dict):
sync_rule = {
"model_info": {
"config": All()
},
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = PoissonRegression(input_dim=model_config["input_dim"],
bias=model_config["bias"])
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(train_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(trainset, batch_size, shuffle=True)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(val_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(valset, batch_size, shuffle=True)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 6,322 | 36.414201 | 103 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalVggLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,018 | 37.09434 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_assist_trainer
class HorizontalVggAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,538 | 41.75 | 91 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from PIL import Image
import torchvision.transforms as transforms
from algorithm.core.data_io import CsvReader, NpzReader
from algorithm.model.vgg import VGG
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = VGG(num_classes=model_config["num_classes"], layers=model_config["layers"])
model = model.to(self.device)
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
elif conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_train(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features()[:100]), torch.tensor(train_data.label()[:100])
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(
trainset, batch_size, shuffle=True, collate_fn=img_collate_fn
)
return train_dataloader
def _set_val_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_test(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features()[:100]), torch.tensor(val_data.label()[:100])
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(
valset, batch_size, shuffle=False, collate_fn=img_collate_fn
)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 7,957 | 37.819512 | 101 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg_jax/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.jax.fedtype import _get_label_trainer
from common.utils.logger import logger
from .common import Common
from jax import jit, value_and_grad
class HorizontalVggJaxLabelTrainer(Common, _get_label_trainer()):
def __init__(self, train_conf: dict):
_get_label_trainer().__init__(self, train_conf)
self._set_jit_train_step()
self._set_jit_val_step()
def _set_jit_train_step(self):
def train_step(batch, state):
loss_fn = lambda params: self.calculate_loss(params, state.batch_stats, batch, train=True)
ret, grads = value_and_grad(loss_fn, has_aux=True)(state.params)
loss, _, new_model_state = ret[0], *ret[1]
state = state.apply_gradients(grads=grads, batch_stats=new_model_state['batch_stats'])
return loss, state
self.jit_train_step = jit(train_step)
def train_loop(self):
train_loss = 0
for batch_id, batch in enumerate(self.train_dataloader):
loss, self.state = self.jit_train_step(batch, self.state)
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 1,891 | 40.130435 | 102 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg_jax/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.jax.fedtype import _get_assist_trainer
from functools import partial
from .common import Common
class HorizontalVggJaxAssistTrainer(Common, _get_assist_trainer()):
def __init__(self, train_conf: dict):
_get_assist_trainer().__init__(self, train_conf)
self._set_jit_val_step()
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_local_epoch", rank=3,
func=self._save_model, desc="save model")
def train_loop(self):
pass
| 1,286 | 38 | 91 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg_jax/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from jax import jit, random
import jax.numpy as jnp
import flax.linen as nn
import torch
from torch.utils.data import DataLoader, TensorDataset
import torchvision.transforms as transforms
from PIL import Image
from flax.core.frozen_dict import FrozenDict
from collections import OrderedDict
from algorithm.core.data_io import NpzReader
from algorithm.model.vgg_jax import vggjax
from common.utils.logger import logger
class Common():
def _set_model(self) -> nn.Module:
model = None
self.init_params = None
self.init_batch_stats = None
self.state = None
exmp_features = self.exmp_label if self.exmp_label is not None else self.exmp_assist
model_config = self.model_info.get("config")
model = vggjax(num_classes=model_config["num_classes"], layers=model_config["layers"])
init_rng = random.PRNGKey(0)
variables = model.init(init_rng, exmp_features, train=True)
self.init_params, self.init_batch_stats = variables["params"], variables["batch_stats"]
# init the state_dict and keys_dict used for aggregation
self.state_dict = OrderedDict()
self.keys_dict = OrderedDict()
for key in ["params", "batch_stats"]:
self.keys_dict[key] = OrderedDict()
for i, j in variables[key].unfreeze().items():
self.keys_dict[key][i] = []
for k, v in j.items():
self.keys_dict[key][i].append(k)
self.state_dict[i+k] = np.asarray(v, dtype=np.float32)
return model
def state_to_state_dict(self):
for i, j in self.state.params.unfreeze().items():
for k, v in j.items():
self.state_dict[i+k] = np.asarray(v, dtype=np.float32)
for i, j in self.state.batch_stats.unfreeze().items():
for k, v in j.items():
self.state_dict[i+k] = np.asarray(v, dtype=np.float32)
def state_dict_to_state(self):
new_state = dict()
for key in ["params", "batch_stats"]:
new_state[key] = dict()
for i, j in self.keys_dict[key].items():
value_dict = dict()
for k in j:
value_dict[k] = jnp.asarray(self.state_dict[i+k], dtype=np.float32)
new_state[key][i] = value_dict
new_state = FrozenDict(new_state)
self.state = self.state.replace(params=new_state["params"], batch_stats=new_state["batch_stats"])
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
np.array
])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
img = transform_train(img)
img = np.transpose(img, (1, 2, 0))
imgs.append(img) # [N, C, H, W] -> [N, H, W, C]
labels.append(label.numpy())
return jnp.stack(imgs, 0).astype(jnp.float32), jnp.stack(labels, 0).astype(jnp.int32)
train_data = self._read_data(self.input_trainset)
exmp_features = None
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(torch.tensor(train_data.features()[0:100]), torch.tensor(train_data.label()[0:100]))
exmp_features = jnp.ones_like(jnp.stack(train_data.features()[0:2], 0))
batch_size = self.train_params.get("batch_size", 64)
if trainset:
train_dataloader = DataLoader(trainset, batch_size, shuffle=True, collate_fn=img_collate_fn)
return train_dataloader, exmp_features
def _set_val_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
np.array
])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
img = transform_test(img)
img = np.transpose(img, (1, 2, 0))
imgs.append(img) # [N, C, H, W] -> [N, H, W, C]
labels.append(label.numpy())
return jnp.stack(imgs, 0).astype(jnp.float32), jnp.stack(labels, 0).astype(jnp.int32)
val_data = self._read_data(self.input_valset)
exmp_features = None
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(torch.tensor(val_data.features()[0:100]), torch.tensor(val_data.label()[0:100]))
exmp_features = jnp.ones_like(jnp.stack(val_data.features()[0:2], 0))
batch_size = self.train_params.get("batch_size", 64)
if valset:
val_dataloader = DataLoader(valset, batch_size, shuffle=True, collate_fn=img_collate_fn)
return val_dataloader, exmp_features
def calculate_loss(self, params, batch_stats, batch, train):
features, labels = batch
# Run model. During training, we need to update the BatchNorm statistics.
outputs = self.model.apply(
{'params': params, 'batch_stats': batch_stats},
features,
train=train,
mutable=['batch_stats'] if train else False
)
logits, new_model_state = outputs if train else (outputs, None)
loss = self.loss_func(logits, labels).mean()
preds = logits.argmax(axis=-1)
return loss, (preds, new_model_state)
def _set_jit_val_step(self):
def val_step(batch, state):
loss, (preds, _) = self.calculate_loss(state.params, state.batch_stats, batch, train=False)
return loss, preds
self.jit_val_step = jit(val_step)
def val_loop(self, dataset_type: str = "validation", context: dict = {}):
val_loss = 0
val_predicts = []
labels = []
metric_output = {}
if dataset_type in ["validation", "val"]:
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch_id, (feature, label) in enumerate(dataloader):
loss, preds = self.jit_val_step((feature, label), self.state)
val_predicts.append(preds)
val_loss += loss.item()
labels.append(label)
val_loss /= len(dataloader)
metric_output[self.loss_func_name] = val_loss
val_predicts = jnp.concatenate(val_predicts, axis=0)
labels = jnp.concatenate(labels, axis=0)
metrics_conf: dict = self.train_params["metric_config"]
for method in self.metrics:
metric_output[method] = self.metrics[method](labels, val_predicts, **metrics_conf[method])
logger.info(f"Metrics on {dataset_type} set: {metric_output}")
| 8,308 | 39.531707 | 121 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/linear_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalLinearRegressionLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,036 | 36.722222 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/linear_regression/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalLinearRegressionAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,538 | 42.971429 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/linear_regression/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.model.linear_regression import LinearRegression
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict):
sync_rule = {
"model_info": {
"config": All()
},
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = LinearRegression(input_dim=model_config["input_dim"],
bias=model_config["bias"])
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(train_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(trainset, batch_size, shuffle=True)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(val_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(valset, batch_size, shuffle=True)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 6,316 | 35.94152 | 103 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/kmeans/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from sklearn.cluster import KMeans
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalKmeansLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, "torch", "fedavg")
# logger.info("Label trainer initialized")
def train_loop(self):
# load centroids
centroids = self.model.state_dict()['centroids'].numpy()
# train one iter of KMeans
kmeans_model = KMeans(
n_clusters=centroids.shape[0],
init=centroids,
n_init=1,
max_iter=10
)
train_features, _ = self.train_dataloader.dataset.tensors
train_features = train_features.numpy()
kmeans_model.fit(train_features)
logger.info(f"K-Means score: {kmeans_model.score(train_features)}")
# write centroids
model_state_dict = self.model.state_dict()
model_state_dict['centroids'] = torch.tensor(
kmeans_model.cluster_centers_)
# self.model.load_state_dict(model_state_dict)
self.model.centroids = nn.Parameter(
torch.tensor(kmeans_model.cluster_centers_))
| 2,144 | 35.355932 | 91 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/kmeans/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from common.utils.logger import logger
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalKmeansAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
register_agg_type_for_assist_trainer(self, 'torch', "fedavg")
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,491 | 41.628571 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/kmeans/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from torch import nn
from torch.utils.data.dataset import TensorDataset
from torch.utils.data.dataloader import DataLoader
from sklearn.metrics import davies_bouldin_score
from sklearn.cluster import KMeans
from algorithm.core.data_io import CsvReader
from algorithm.model.horizontal_k_means import HorizontalKMeans
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
input_dim = model_config["input_dim"]
num_clusters = model_config["num_clusters"]
model = HorizontalKMeans(
input_dim=input_dim, num_clusters=num_clusters)
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
logger.info(f"Data path: {os.path.abspath(path)}")
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
train_dataloader = None
if train_data:
train_dataset = TensorDataset(
torch.Tensor(train_data.features()),
torch.Tensor(train_data.label())
)
train_dataloader = DataLoader(train_dataset)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
val_dataloader = None
if val_data:
val_dataset = TensorDataset(
torch.Tensor(val_data.features()),
torch.Tensor(val_data.label())
)
val_dataloader = DataLoader(val_dataset)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
val_features, val_label = dataloader.dataset.tensors
val_features = val_features.numpy()
# val_bale = val_label.numpy()
centroids = self.model.state_dict()['centroids'].numpy()
kmeans = KMeans(
n_clusters=centroids.shape[0], init=centroids, n_init=1, max_iter=1)
kmeans.fit(val_features)
kmeans.cluster_centers = centroids
pred_labels = kmeans.predict(val_features)
score = davies_bouldin_score(val_features, pred_labels)
metrics_output = CommonMetrics._calc_metrics(
metrics={},
labels=val_label,
val_predicts=pred_labels,
lossfunc_name="davies_bouldin_score",
loss=score,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
| 4,892 | 34.456522 | 80 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/nbafl/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from common.communication.gRPC.python.channel import DualChannel
from service.fed_config import FedConfig
from functools import partial
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
class HorizontalNbaflLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
register_agg_type_for_label_trainer(self, "torch", "fedavg")
self.sample_size_channel = DualChannel(
name="sample_size_" + FedConfig.node_id,
ids=[FedConfig.get_assist_trainer(), FedConfig.node_id]
)
# initialize prev params
self.prev_params = [
param.data.detach().clone() for param in self.model.parameters()
]
# Update sample size
self.register_hook(
place="before_global_epoch", rank=1,
func=self._update_sample_size, desc="Update local sample size"
)
# Calculate update sigma
self.register_hook(
place="before_global_epoch", rank=2,
func=self._calc_uplink_sigma, desc="Calculate uplink sigma"
)
# Update prev param
self.register_hook(
place="after_local_epoch", rank=1,
func=self._update_prev_param, desc="Update prev param"
)
# Clip norm
self.register_hook(
place="after_local_epoch", rank=2,
func=self._clip_params, desc="Clip param norms"
)
# Add noise
self.register_hook(
place="after_local_epoch", rank=3,
func=self._add_noise, desc="Add uplink noise"
)
# Validation
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
def _update_prev_param(self, context):
self.prev_params = [
param.data.detach().clone() for param in self.model.parameters()
]
def _cal_regularization(self, p=2):
reg = 0.0
for w_prev, w in zip(self.prev_params, self.model.parameters()):
reg += torch.pow(torch.norm(w - w_prev, p), p)
return self.mu * reg / p
def _clip_params(self, context):
for param in self.model.parameters():
norm_ratio = torch.maximum(
torch.ones(param.shape),
torch.abs(param.data) / self.common_config.train_params['C']
)
param.data = param.data / norm_ratio
return
def _calc_uplink_sigma(self, context):
delta_S_u = 2 * self.common_config.train_params['C'] / \
len(self.train_dataloader.dataset)
sigma_u = self.c * delta_S_u / self.epsilon
logger.info("Uplink sigma: {}".format(sigma_u))
self.sigma_u = sigma_u
return
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
for batch_idx, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
reg = self._cal_regularization()
loss += reg
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
# retain current params
self.prev_params = [
param.data.detach().clone() for param in self.model.parameters()
]
return train_loss
def _add_noise(self, context):
for param in self.model.parameters():
param.data += torch.distributions.Normal(
loc=0, scale=self.sigma_u).sample(param.size()).to(self.device)
return
def _update_sample_size(self, context):
logger.info("trainset length: {}".format(
len(self.train_dataloader.dataset)))
self.sample_size_channel.send(len(self.train_dataloader.dataset))
return
| 4,761 | 33.014286 | 79 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/nbafl/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from common.communication.gRPC.python.channel import DualChannel
from service.fed_config import FedConfig
from functools import partial
from common.utils.logger import logger
from .common import Common
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_assist_trainer
class HorizontalNbaflAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
register_agg_type_for_assist_trainer(self, "torch", "fedavg")
self.load_model()
self.sample_size_channel = {}
# Init size channel
for party_id in FedConfig.get_label_trainer():
self.sample_size_channel[party_id] = DualChannel(
name="sample_size_" + party_id,
ids=[FedConfig.node_id, party_id]
)
# Get sample size
self.register_hook(
place="before_global_epoch", rank=1,
func=self._get_sample_size, desc="Get sample size"
)
# Calculate downlink noise
self.register_hook(
place="before_global_epoch", rank=2,
func=self._calc_downlink_sigma, desc="Calculate downlink noise"
)
# Add noise
self.register_hook(
place="after_local_epoch", rank=1,
func=self._add_noise, desc="Add downlink noise"
)
# Validation
self.register_hook(
place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset"
)
self.register_hook(
place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model"
)
def _calc_downlink_sigma(self, context):
logger.info("Calculating downlink sigma")
if self.common_config.train_params['global_epoch'] > \
self.common_config.train_params['num_client'] * \
np.sqrt(self.common_config.train_params['num_client']):
sigma_d = (
2 * self.common_config.train_params['C'] * self.c * np.sqrt(
self.common_config.train_params['global_epoch'] ** 2 - \
np.power(self.common_config.train_params['num_client'], 3)) / \
(self.min_sample_num * \
self.common_config.train_params['num_client'] * \
self.common_config.train_params['epsilon'])
)
else:
sigma_d = 0.0
logger.info("Downlink sigma: {}".format(sigma_d))
self.sigma_d = sigma_d
return
def _add_noise(self, context):
if self.sigma_d > 0:
noise_generator = torch.distributions.Normal(
loc=0, scale=self.sigma_d)
for param_data in self.model.parameters():
param_data.data += noise_generator.sample(param_data.size())
return
def _get_sample_size(self, context):
sample_nums = []
for party_id in FedConfig.get_label_trainer():
single_sample_size = self.sample_size_channel[party_id].recv()
sample_nums.append(single_sample_size)
sample_num_array = np.array(sample_nums)
logger.info("Sample num array: {}".format(sample_num_array))
self.min_sample_num = np.min(sample_num_array)
return
def train_loop(self):
pass
| 4,053 | 36.192661 | 87 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/nbafl/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
from common.utils.config_sync import ConfigSynchronizer
from common.utils.logger import logger
class Common(BaseTrainer):
def __init__(self, train_conf: dict):
sync_rule = {
"model_info": {
"config": All()
},
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"mu": All(),
"epsilon": All(),
"delta": All(),
"C": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
self.mu = self.common_config.train_params['mu']
self.delta = self.common_config.train_params['delta']
self.c = np.sqrt(2 * np.log(1.25 / self.delta))
self.epsilon = self.common_config.train_params['epsilon']
def _set_model(self):
logger.info("Model info: {}".format(self.common_config.model_info))
model_config = self.common_config.model_info["config"]
assert len(model_config['layer_dim']) == len(
model_config['activation']), "Hidden layer nums must match activation nums"
layer_dims = [model_config['input_dim']] + model_config['layer_dim']
layer_act = model_config['activation']
module_list = []
for input_dim, output_dim, activation_str in zip(layer_dims, layer_dims[1:], layer_act):
module_list.append(
nn.Linear(input_dim, output_dim, bias=model_config['bias']))
activation = getattr(nn, activation_str)()
module_list.append(activation)
model = nn.Sequential(*module_list)
return model
def load_model(self):
self._load_model({})
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(train_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(trainset, batch_size, shuffle=True)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(val_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(valset, batch_size, shuffle=True)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
with torch.no_grad():
for batch_idx, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 7,492 | 37.229592 | 103 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/nbafl/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/horizontal/chatglm/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import Trainer
from service.fed_config import FedConfig
from algorithm.framework.horizontal.chatglm.common import Common
from algorithm.framework.horizontal.chatglm.callback import LabelTrainerCallback
class HorizontalChatglmLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
def fit(self):
agg_steps = self.common_config.aggregation.get("agg_steps") or self.common_config.train_params["trainer"]["save_steps"]
sec_conf = self.common_config.train_params["encryption"]
(peft_type, peft_config_dict), = self.common_config.train_params["peft"].items()
my_callback = LabelTrainerCallback(agg_steps,
sec_conf,
root_id=FedConfig.get_assist_trainer(),
leaf_ids=FedConfig.get_label_trainer(),
init_params=not self.load_from_pretrained,
peft_type=peft_type)
trainer = Trainer(
model=self.model,
args=self.training_args,
train_dataset=self.train_dataset,
eval_dataset=self.val_dataset,
tokenizer=self.tokenizer,
data_collator=self.data_collator,
callbacks=[my_callback],
)
trainer.train()
| 2,039 | 40.632653 | 127 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/chatglm/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from transformers import Trainer
from algorithm.core.horizontal.aggregation.api import get_aggregation_root_inst
from service.fed_config import FedConfig
from common.utils.logger import logger
from algorithm.framework.horizontal.chatglm.common import Common
from algorithm.framework.horizontal.chatglm.callback import (
AssistTrainerCallback, get_adapter_state_dict, set_adapter_state_dict
)
class HorizontalChatglmAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
def fit(self):
agg_steps = self.common_config.aggregation.get("agg_steps") or self.common_config.train_params["trainer"]["save_steps"]
sec_conf = self.common_config.train_params["encryption"]
(peft_type, peft_config_dict), = self.common_config.train_params["peft"].items()
if len(self.common_config.input_trainset) != 0:
my_callback = AssistTrainerCallback(agg_steps,
sec_conf,
root_id=FedConfig.get_assist_trainer(),
leaf_ids=FedConfig.get_label_trainer(),
init_params=not self.load_from_pretrained,
peft_type=peft_type)
trainer = Trainer(
model=self.model,
args=self.training_args,
train_dataset=self.train_dataset,
eval_dataset=self.val_dataset,
tokenizer=self.tokenizer,
data_collator=self.data_collator,
callbacks=[my_callback],
)
trainer.train()
else:
agg_inst = get_aggregation_root_inst(sec_conf,
root_id=FedConfig.get_assist_trainer(),
leaf_ids=FedConfig.get_label_trainer())
self.agg_steps_list = []
i = agg_steps
while i < 1:
self.agg_steps_list.append(round(i, 4))
i += agg_steps
self.agg_steps_list.append(1)
adapters_weights = get_adapter_state_dict(self.model, peft_type)
agg_inst.broadcast(adapters_weights)
for i in range(len(self.agg_steps_list)):
logger.info(f"gather and agg, global_step={self.agg_steps_list[i]}")
new_adapters_weights = agg_inst.aggregate()
logger.info(f"broadcast, global_step={self.agg_steps_list[i]}")
agg_inst.broadcast(new_adapters_weights)
if self.training_args.output_dir and self.training_args.save_strategy != 'no':
save_dir = Path(self.common_config.output["path"]) / f"checkpoint-{str(self.agg_steps_list[i])}"
if peft_type == "PREFIX_TUNING":
self.model.save_pretrained(save_directory=save_dir,
state_dict=new_adapters_weights)
else:
set_adapter_state_dict(self.model, peft_type, new_adapters_weights)
self.model.save_pretrained(save_directory=save_dir)
| 3,982 | 45.313953 | 127 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/chatglm/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import torch
from peft import (
get_peft_model, PeftModel, PEFT_TYPE_TO_CONFIG_MAPPING
)
from transformers import (
AutoTokenizer,
AutoModel,
AutoConfig,
DataCollatorForSeq2Seq,
TrainingArguments
)
from algorithm.core.data_io import QADataset
from service.fed_config import FedConfig
from common.utils.config_parser import CommonConfigParser
from common.utils.logger import logger
from common.checker.x_types import All
from common.utils.config_sync import ConfigSynchronizer
def alternateSVDLinear():
import torch.nn.functional as F
from peft.tuners.adalora import transpose
def forward(self, x: torch.Tensor):
if self.active_adapter not in self.lora_A.keys():
return F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
if self.disable_adapters:
if self.r[self.active_adapter] > 0 and self.merged:
self.unmerge()
result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
elif self.r[self.active_adapter] > 0 and not self.merged:
result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
result += (
(
self.lora_dropout[self.active_adapter](x.float()) # to float()
@ (self.lora_A[self.active_adapter] * self.lora_E[self.active_adapter]).T
@ self.lora_B[self.active_adapter].T
)
* self.scaling[self.active_adapter]
/ (self.ranknum[self.active_adapter] + 1e-5)
)
else:
result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
return result
from peft.tuners.adalora import SVDLinear
SVDLinear.forward = forward
class Common:
def __init__(self, train_conf: dict):
if FedConfig.get_assist_trainer():
sync_rule = {
"train_info": {
"train_params": {
"aggregation": All(),
"encryption": All(),
"peft": All(),
"trainer": {
"learning_rate": All(),
"weight_decay": All(),
"adam_beta1": All(),
"adam_beta2": All(),
"adam_epsilon": All(),
"max_grad_norm": All(),
"max_steps": All(),
"num_train_epochs": All(),
"seed": All()
},
"dataset": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
root_path = Path(__file__).parents[4]
path = train_conf.get('input', {}).get("pretrained_model", {}).get("path")
if path and not os.path.isabs(path):
train_conf["input"]["pretrained_model"]['path'] = os.path.abspath(os.path.join(root_path, path))
path = train_conf.get('input', {}).get("adapter_model", {}).get("path")
if path and not os.path.isabs(path):
train_conf["input"]["adapter_model"]['path'] = os.path.abspath(os.path.join(root_path, path))
trainset_conf = train_conf.get('input', {}).get("trainset")
if trainset_conf:
path = trainset_conf[0].get("path")
if path and not os.path.isabs(path):
train_conf["input"]["trainset"][0]['path'] = os.path.abspath(os.path.join(root_path, path))
self.common_config = CommonConfigParser(train_conf)
# for adalora
alternateSVDLinear()
# CPU
if self.common_config.train_params["trainer"].get("no_cuda"):
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
pretrained_model_conf = self.common_config.input.get("pretrained_model", {})
path = pretrained_model_conf.get("path")
model_name_or_path = path
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) # Callback
logger.info(self.tokenizer)
self.load_from_pretrained = False
for name in os.listdir(model_name_or_path):
if 'pytorch_model' in name:
self.load_from_pretrained = True
break
(peft_type, peft_config_dict), = self.common_config.train_params["peft"].items()
if self.load_from_pretrained:
if peft_type == "PREFIX_TUNING":
config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
config.pre_seq_len = self.common_config.train_params["peft"][peft_type]["pre_seq_len"]
config.prefix_projection = self.common_config.train_params["peft"][peft_type]["prefix_projection"]
model = AutoModel.from_pretrained(model_name_or_path, config=config, trust_remote_code=True)
else:
model = AutoModel.from_pretrained(model_name_or_path, trust_remote_code=True, device_map="auto")
else:
if peft_type == "PREFIX_TUNING":
config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
config.pre_seq_len = self.common_config.train_params["peft"][peft_type]["pre_seq_len"]
config.prefix_projection = self.common_config.train_params["peft"][peft_type]["prefix_projection"]
model = AutoModel.from_config(config, trust_remote_code=True)
else: # if peft_type == "LORA":
config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModel.from_config(config, trust_remote_code=True)
logger.warning("No pretrained model founded, load from config")
if self.common_config.train_params["trainer"].get("no_cuda"):
model = model.float()
else:
model = model.half()
if peft_type == "PREFIX_TUNING":
self.model = model
else:
peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type](inference_mode=False, **peft_config_dict)
# model = prepare_model_for_int8_training(model)
self.model = get_peft_model(model, peft_config)
if self.common_config.input.get("adapter_model", {}):
adapter_path = self.common_config.input.get("adapter_model")["path"]
if peft_type == "PREFIX_TUNING":
# P-tuning v2
prefix_state_dict = torch.load(os.path.join(adapter_path, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
self.model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
else:
self.model = PeftModel.from_pretrained(self.model,
adapter_path,
adapter_name="default",
is_trainable=True)
logger.info("Load adapter model.")
if peft_type == "PREFIX_TUNING":
if not self.common_config.train_params["trainer"].get("no_cuda"):
self.model.transformer.prefix_encoder.float()
logger.info(self.model)
self.train_dataset, self.val_dataset = self._set_dataset()
self.data_collator = self._set_data_collator()
trainer_conf = self.common_config.train_params["trainer"]
trainer_conf["max_steps"] = -1
# trainer_conf["save_strategy"] = 'steps'
trainer_conf["save_steps"] = self.common_config.train_params["aggregation"]["agg_steps"]
trainer_conf["output_dir"] = self.common_config.output_dir
self.training_args = TrainingArguments(**trainer_conf)
self.trainer_conf = trainer_conf
if peft_type == "PREFIX_TUNING":
self.training_args.local_rank = -1
def _set_data_collator(self):
data_collator = DataCollatorForSeq2Seq(
self.tokenizer,
model=None,
label_pad_token_id=-100,
pad_to_multiple_of=None,
padding=True
)
return data_collator
def _set_dataset(self):
dataset_conf = self.common_config.train_params["dataset"]
train_dataset, val_dataset = None, None
if self.common_config.input_trainset:
file_name_or_path = os.path.join(
self.common_config.input_trainset[0].get("path")
)
train_dataset = QADataset(
file_name_or_path=file_name_or_path,
tokenizer=self.tokenizer,
max_src_length=dataset_conf["max_src_length"],
max_dst_length=dataset_conf["max_dst_length"],
prompt_pattern=dataset_conf["prompt_pattern"],
key_query=dataset_conf.get("key_query", "input"),
key_answer=dataset_conf.get("key_answer", "output")
)
if self.common_config.input_valset:
file_name_or_path = os.path.join(
self.common_config.input_valset[0].get("path")
)
val_dataset = QADataset(
file_name_or_path=file_name_or_path,
tokenizer=self.tokenizer,
max_src_length=dataset_conf["max_src_length"],
max_dst_length=dataset_conf["max_dst_length"],
prompt_pattern=dataset_conf["prompt_pattern"],
key_query=dataset_conf.get("key_query", "input"),
key_answer=dataset_conf.get("key_answer", "output")
)
return train_dataset, val_dataset
| 10,798 | 42.898374 | 114 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/chatglm/callback.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import inspect
from pathlib import Path
from typing import Union
from copy import deepcopy
import torch
import torch.nn as nn
import transformers
from accelerate import (
dispatch_model, infer_auto_device_map
)
from accelerate.utils import get_balanced_memory
from accelerate.hooks import (
AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules
)
from transformers import (
TrainerCallback, TrainingArguments, TrainerState, TrainerControl
)
from peft import PeftModel
from peft.utils import (
get_peft_model_state_dict, set_peft_model_state_dict, PromptLearningConfig
)
from algorithm.core.horizontal.aggregation.api import (
get_aggregation_root_inst, get_aggregation_leaf_inst
)
from common.utils.logger import logger
from service.fed_config import FedConfig
# def is_nan_exists(state_dict):
# flag = False
# for k, v in state_dict.items():
# if torch.isnan(v).any():
# flag = True
# logger.warning(f"Parameter {k} contains nan")
# break
# return flag
class AssistTrainerCallback(TrainerCallback):
def __init__(self,
agg_steps: int,
sec_conf: dict,
root_id: str,
leaf_ids: list[str],
init_params: bool = False,
peft_type: str = "LORA"):
super().__init__()
self.agg_steps = agg_steps
self.agg_steps_list = []
assert 0 < agg_steps <= 1
self.agg_inst = get_aggregation_root_inst(sec_conf, root_id, leaf_ids)
self.init_params = init_params
self.peft_type = peft_type
# self.latest_adapters_weights = None
def on_train_begin(self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
model: Union[transformers.PreTrainedModel, torch.nn.Module],
**kwargs):
if self.init_params:
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
torch.nn.init.normal_(m.weight.data, 0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Embedding):
torch.nn.init.uniform_(m.weight.data)
args.logging_steps = state.max_steps + 1
args.save_steps = state.max_steps + 1
adapters_weights = get_adapter_state_dict(model, self.peft_type)
self.agg_inst.broadcast(adapters_weights)
# self.latest_adapters_weights = adapters_weights
# def on_step_begin(self,
# args: TrainingArguments,
# state: TrainerState,
# control: TrainerControl,
# model: Union[transformers.PreTrainedModel, torch.nn.Module],
# **kwargs):
# for k, v in model.state_dict().items():
# if torch.isnan(v).any():
# # set nan to 0
# v[v != v] = 0
# logger.warning(f"Parameter {k} contains nan, replace nan to 0")
# control.should_log = False
def on_step_end(self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
model: Union[transformers.PreTrainedModel, torch.nn.Module],
tokenizer,
**kwargs):
# Trainer saves model after check on_step_end
if not self.agg_steps_list:
i = self.agg_steps
while i < 1:
self.agg_steps_list.append(round(i, 4))
i += self.agg_steps
self.agg_steps_list.append(1)
self.steps_list = [math.ceil(i * state.max_steps)
for i in self.agg_steps_list]
assert len(self.steps_list) == len(set(self.steps_list))
logger.info(f"Aggergate model by steps: {self.agg_steps_list}")
if state.global_step in self.steps_list:
idx = self.steps_list.index(state.global_step)
if idx == 0:
factor = 1
else:
factor = 1
adapters_weights = get_adapter_state_dict(model, self.peft_type)
logger.info(
f"gather and aggregating..., global_step={state.global_step}")
new_adapters_weights = self.agg_inst.aggregate(
parameters=adapters_weights, parameters_weight=factor)
set_adapter_state_dict(model, self.peft_type, new_adapters_weights)
logger.info(f"broadcasting..., global_step={state.global_step}")
self.agg_inst.broadcast(new_adapters_weights)
if args.output_dir and args.save_strategy != 'no':
if self.peft_type != "PREFIX_TUNING":
model.save_pretrained(save_directory=Path(args.output_dir) / f"checkpoint-{str(self.agg_steps_list[idx])}")
else:
model.save_pretrained(save_directory=Path(args.output_dir) / f"checkpoint-{str(self.agg_steps_list[idx])}",
state_dict=get_adapter_state_dict(model, self.peft_type))
control.should_log = True
class LabelTrainerCallback(TrainerCallback):
def __init__(self,
agg_steps: Union[float, int],
sec_conf: dict,
root_id: str,
leaf_ids: list[str],
init_params: bool = False,
peft_type: str = "LORA"):
super().__init__()
self.agg_steps = agg_steps
self.agg_steps_list = []
assert 0 < agg_steps <= 1
self.is_standalone = False if FedConfig.get_assist_trainer() else True
if not self.is_standalone:
self.agg_inst = get_aggregation_leaf_inst(sec_conf, root_id, leaf_ids)
self.init_params = init_params
self.peft_type = peft_type
def on_train_begin(self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
model: Union[transformers.PreTrainedModel, torch.nn.Module],
train_dataloader,
**kwargs):
if self.init_params:
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
torch.nn.init.normal_(m.weight.data, 0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Embedding):
torch.nn.init.uniform_(m.weight.data)
args.logging_steps = state.max_steps + 1
args.save_steps = state.max_steps + 1
if not self.is_standalone:
new_adapters_weights = self.agg_inst.download()
set_adapter_state_dict(model, self.peft_type, new_adapters_weights)
# def on_step_begin(self,
# args: TrainingArguments,
# state: TrainerState,
# control: TrainerControl,
# model: Union[transformers.PreTrainedModel, torch.nn.Module],
# **kwargs):
# for k, v in model.state_dict().items():
# if torch.isnan(v).any():
# # set nan to 0
# v[v != v] = 0
# logger.warning(f"Parameter {k} contains nan, replace nan to 0")
# control.should_log = False
def on_step_end(self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
model: Union[transformers.PreTrainedModel, torch.nn.Module],
**kwargs):
# if is_nan_exists(model.state_dict()):
# logger.warning(f"Nan exists!")
# Trainer saves model after check on_step_end
if not self.agg_steps_list:
i = self.agg_steps
while i < 1:
self.agg_steps_list.append(round(i, 4))
i += self.agg_steps
self.agg_steps_list.append(1)
self.steps_list = [math.ceil(i * state.max_steps)
for i in self.agg_steps_list]
if len(self.steps_list) != len(set(self.steps_list)):
raise ValueError(f"agg_steps is too small, try a larger one.")
logger.info(f"Aggergate model by steps: {self.agg_steps_list}")
if state.global_step in self.steps_list:
idx = self.steps_list.index(state.global_step)
if not self.is_standalone:
if idx == 0:
factor = 1
# factor = self.agg_steps_list[0] * \
# args.gradient_accumulation_steps * args.train_batch_size
else:
factor = 1
# factor = (self.agg_steps_list[idx] - self.agg_steps_list[idx-1]) * \
# args.gradient_accumulation_steps * args.train_batch_size
adapters_weights = get_adapter_state_dict(model, self.peft_type)
logger.info(f"uploading..., global_step={state.global_step}")
self.agg_inst.upload(adapters_weights, factor)
logger.info(f"downloading..., global_step={state.global_step}")
new_adapters_weights = self.agg_inst.download()
set_adapter_state_dict(model, self.peft_type, new_adapters_weights)
if args.output_dir and args.save_strategy != 'no':
if self.peft_type != "PREFIX_TUNING":
model.save_pretrained(save_directory=Path(args.output_dir) / f"checkpoint-{str(self.agg_steps_list[idx])}")
else:
model.save_pretrained(save_directory=Path(args.output_dir) / f"checkpoint-{str(self.agg_steps_list[idx])}",
state_dict=get_adapter_state_dict(model, self.peft_type))
control.should_log = True
def get_adapter_state_dict(model: PeftModel, peft_type: str, **kwargs):
if peft_type == "PREFIX_TUNING":
state_dict = model.state_dict()
adapters_weights = {}
for k, v in model.named_parameters():
if v.requires_grad:
adapters_weights[k] = deepcopy(state_dict[k]).to('cpu')
else: # if peft_type == 'LORA':
adapter_name = model.active_adapter
adapters_weights = get_peft_model_state_dict(
model, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name
)
for k, v in adapters_weights.items():
adapters_weights[k] = deepcopy(v).to('cpu')
return adapters_weights
def set_adapter_state_dict(model: PeftModel, peft_type: str, adapters_weights: dict, **kwargs):
if peft_type == "PREFIX_TUNING":
state_dict = model.state_dict()
state_dict.update(adapters_weights)
model.load_state_dict(state_dict)
else:
adapter_name = model.active_adapter
# load the weights into the model
set_peft_model_state_dict(model, adapters_weights, adapter_name=adapter_name)
if (
(getattr(model, "hf_device_map", None) is not None)
and (len(set(model.hf_device_map.values()).intersection({"cpu", "disk"})) > 0)
and len(model.peft_config) == 1
):
device_map = kwargs.get("device_map", "auto")
max_memory = kwargs.get("max_memory", None)
offload_dir = kwargs.get("offload_folder", None)
offload_index = kwargs.get("offload_index", None)
dispatch_model_kwargs = {}
# Safety checker for previous `accelerate` versions
# `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/
if "offload_index" in inspect.signature(dispatch_model).parameters:
dispatch_model_kwargs["offload_index"] = offload_index
no_split_module_classes = model._no_split_modules
if device_map != "sequential":
max_memory = get_balanced_memory(
model,
max_memory=max_memory,
no_split_module_classes=no_split_module_classes,
low_zero=(device_map == "balanced_low_0"),
)
if isinstance(device_map, str):
device_map = infer_auto_device_map(
model, max_memory=max_memory, no_split_module_classes=no_split_module_classes
)
dispatch_model(
model,
device_map=device_map,
offload_dir=offload_dir,
**dispatch_model_kwargs,
)
hook = AlignDevicesHook(io_same_device=True)
if isinstance(model.peft_config[adapter_name], PromptLearningConfig):
remove_hook_from_submodules(model.prompt_encoder)
add_hook_to_module(model.get_base_model(), hook)
# Set model in evaluation mode to deactivate Dropout modules by default
# model.eval()
| 14,501 | 41.527859 | 127 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/bert/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalBertLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch_id, (input_ids, token_type_ids, attention_masks, labels) in enumerate(self.train_dataloader):
_,_,pred = self.model(input_ids, token_type_ids, attention_masks, labels)
loss = lossfunc(pred, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,106 | 38.754717 | 111 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/bert/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalBertAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,529 | 42.714286 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/bert/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import pandas as pd
import numpy as np
from transformers import BertTokenizer
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from python.algorithm.model.bert import BertForSst2Torch
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = BertForSst2Torch(**model_config)
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
path = os.path.join(conf['path'], conf['name'])
raw_data = pd.read_csv(path, sep='\t')
data = raw_data["sentence"].values, raw_data["label"].values
return data
def _encode_examples(self, data, tokenizer, max_length=512):
input_ids, token_type_ids, attention_masks, labels = [], [], [], []
for feature, label in zip(*data):
bert_input = tokenizer.encode_plus(feature,
add_special_tokens=True,
max_length=max_length,
padding='max_length',
return_token_type_ids=True,
return_attention_mask=True)
input_ids.append(bert_input['input_ids'])
token_type_ids.append(bert_input['token_type_ids'])
attention_masks.append(bert_input['attention_mask'])
labels.append(label)
return TensorDataset(torch.tensor(input_ids), torch.tensor(token_type_ids),
torch.tensor(attention_masks), torch.tensor(labels))
def _set_train_dataloader(self):
train_dataset = None
train_dataloader = None
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
train_data = self._read_data(self.common_config.input_trainset)
if train_data is not None:
train_dataset = self._encode_examples(train_data, tokenizer)
batch_size = self.common_config.train_params.get(
"train_batch_size")
train_dataloader = DataLoader(
train_dataset, batch_size, shuffle=True)
return train_dataloader
def _set_val_dataloader(self):
val_dataset = None
val_dataloader = None
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
val_data = self._read_data(self.common_config.input_valset)
if val_data is not None:
val_dataset = self._encode_examples(val_data, tokenizer)
batch_size = self.common_config.train_params.get("val_batch_size")
val_dataloader = DataLoader(val_dataset, batch_size, shuffle=False)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch_id, (input_ids, token_type_ids, attention_masks, label) in enumerate(dataloader):
_, _, pred = self.model(
input_ids, token_type_ids, attention_masks, label)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 7,291 | 39.966292 | 99 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/resnet_paddle/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.paddle.fedavg.label_trainer import FedAvgLabelTrainer
from common.utils.logger import logger
import numpy as np
from .common import Common
class HorizontalResnetPaddleLabelTrainer(Common, FedAvgLabelTrainer):
def __init__(self, train_conf: dict):
FedAvgLabelTrainer.__init__(self, train_conf)
def train_loop(self):
self.model.train()
train_losses = []
loss_func = list(self.loss_func.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
optimizer = list(self.optimizer.values())[0]
if lr_scheduler:
optimizer
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = loss_func(pred, label)
optimizer.clear_grad()
loss.backward()
optimizer.step()
train_losses.append(loss.numpy())
train_loss = np.mean(train_losses)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 1,796 | 36.4375 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/resnet_paddle/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.paddle.fedavg.assist_trainer import FedAvgAssistTrainer
from functools import partial
from .common import Common
class HorizontalResnetPaddleAssistTrainer(Common, FedAvgAssistTrainer):
def __init__(self, train_conf: dict):
FedAvgAssistTrainer.__init__(self, train_conf)
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_local_epoch", rank=3,
func=self._save_model, desc="save model")
def train_loop(self):
pass
| 1,272 | 38.78125 | 95 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/resnet_paddle/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import numpy as np
from algorithm.core.data_io import CsvReader, NpzReader
from algorithm.model.resnet_paddle import ResNet
from common.utils.logger import logger
from paddle.io import TensorDataset, DataLoader
import paddle.vision.transforms as transforms
import paddle.nn as nn
from PIL import Image
class Common():
def _set_model(self) -> nn.Layer:
paddle.device.set_device(self.device)
model_config = self.model_info.get("config")
model = ResNet(num_classes=model_config["num_classes"], layers=model_config["layers"])
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
elif conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_train(img))
labels.append(label)
return paddle.stack(imgs,0), paddle.stack(labels, 0)
train_data = self._read_data(self.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset([paddle.to_tensor(train_data.features(),dtype="float64"), paddle.to_tensor(train_data.label(), dtype="int64")])
batch_size = self.train_params.get("batch_size", 64)
if trainset:
train_dataloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, collate_fn=img_collate_fn)
return train_dataloader
def _set_val_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_test(img))
labels.append(label)
return paddle.stack(imgs,0), paddle.stack(labels, 0)
val_data = self._read_data(self.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset([paddle.to_tensor(val_data.features(),dtype="float64"), paddle.to_tensor(val_data.label(), dtype="int64")])
batch_size = self.train_params.get("batch_size", 64)
if valset:
val_dataloader = DataLoader(valset, batch_size=batch_size, shuffle=True, collate_fn=img_collate_fn)
return val_dataloader
def val_loop(self, dataset_type: str = "validation", context: dict = {}):
self.model.eval()
val_losses = []
val_predicts = []
labels = []
metric_output = {}
loss_func_name = list(self.loss_func.keys())[0]
loss_func = list(self.loss_func.values())[0]
if dataset_type in ["validation", "val"]:
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = loss_func(pred, label)
val_predicts.append(pred.squeeze(-1).numpy())
val_losses.append(loss.numpy())
labels.append(label.squeeze(-1).numpy())
val_loss = np.mean(val_losses)
metric_output[loss_func_name] = val_loss
val_predicts = np.concatenate(val_predicts, axis=0)
labels = np.concatenate(labels, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_conf: dict = self.train_params["metric_config"]
for method in self.metrics:
metric_output[method] = self.metrics[method](labels, val_predicts, **metrics_conf[method])
logger.info(f"Metrics on {dataset_type} set: {metric_output}")
| 5,692 | 38.262069 | 148 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/gcn_mol/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalGcnMolLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[
0] if self.lr_scheduler.values() else None
for batch, (smiles, bg, labels, masks) in enumerate(self.train_dataloader):
node_feats = bg.ndata.pop('h')
logits = self.model(bg, node_feats)
labels = labels.reshape((-1,1))
loss = lossfunc(logits, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,116 | 36.803571 | 91 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/gcn_mol/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalGcnMolAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,528 | 42.685714 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/gcn_mol/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import dgl
from dgllife.model import GCNPredictor
from dgllife.utils import SMILESToBigraph, CanonicalAtomFeaturizer
from algorithm.core.data_io import CsvReader, NpzReader
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class SmilesDataset(Dataset):
def __init__(self, smiles, graphs, labels):
assert len(smiles) == len(
graphs), "Inconsistent lengths of smiles and graphs"
assert len(graphs) == len(
labels), "Inconsistent lengths of graphs and labels"
self.smiles = smiles
self.graphs = graphs
self.labels = labels
def __len__(self):
return len(self.smiles)
def __getitem__(self, index):
return self.smiles[index], self.graphs[index], self.labels[index]
def collate_molgraphs(data):
"""Function from dgllife.examples.property_prediction.moleculenet.utils
Batching a list of datapoints for dataloader.
Parameters
----------
data : list of 3-tuples or 4-tuples.
Each tuple is for a single datapoint, consisting of
a SMILES, a DGLGraph, all-task labels and optionally a binary
mask indicating the existence of labels.
Returns
-------
smiles : list
List of smiles
bg : DGLGraph
The batched DGLGraph.
labels : Tensor of dtype float32 and shape (B, T)
Batched datapoint labels. B is len(data) and
T is the number of total tasks.
masks : Tensor of dtype float32 and shape (B, T)
Batched datapoint binary mask, indicating the
existence of labels.
"""
if len(data[0]) == 3:
smiles, graphs, labels = map(list, zip(*data))
else:
smiles, graphs, labels, masks = map(list, zip(*data))
bg = dgl.batch(graphs)
bg.set_n_initializer(dgl.init.zero_initializer)
bg.set_e_initializer(dgl.init.zero_initializer)
labels = torch.stack(labels, dim=0)
if len(data[0]) == 3:
masks = torch.ones(labels.shape)
else:
masks = torch.stack(masks, dim=0)
return smiles, bg, labels, masks
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model_params = self._prepare_model_params(model_config)
model = GCNPredictor(**model_params)
return model
def _prepare_model_params(self, model_config):
config = {}
config['in_feats'] = model_config.get("input_dim", 100)
num_gnn_layers = model_config.get('num_gnn_layers', 1)
config['hidden_feats'] = [model_config.get(
'gnn_hidden_feats', 64)] * num_gnn_layers
if model_config['activation'] == 'relu':
config['activation'] = [F.relu] * num_gnn_layers
elif model_config['activation'] == 'tanh':
config['activation'] = [F.tanh] * num_gnn_layers
else:
logger.info(f"Setting gnn activation to relu")
config['activation'] = [F.relu] * num_gnn_layers
config['dropout'] = [model_config.get("dropout", 0.5)] * num_gnn_layers
config['batchnorm'] = [model_config.get(
'batchnorm', False)] * num_gnn_layers
config['residual'] = [model_config.get(
"residual", False)] * num_gnn_layers
config['predictor_hidden_feats'] = model_config.get(
'predictor_hidden_dim', 64)
config['n_tasks'] = model_config.get('n_tasks', 1)
return config
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
elif conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data is None:
return train_dataloader
# construct smiles
smiles = train_data.features(type="series").values.reshape((-1))
labels = train_data.label().astype(np.int32)
smiles_to_g = SMILESToBigraph(
add_self_loop=True,
node_featurizer=CanonicalAtomFeaturizer()
)
graph_list = []
for smile in smiles:
graph_list.append(smiles_to_g(smile))
valid_ids = []
clean_graphs = []
failed_mols = []
clean_labels = []
clean_smiles = []
for i, g in enumerate(graph_list):
if g is not None:
valid_ids.append(i)
clean_graphs.append(g)
clean_labels.append(labels[i])
clean_smiles.append(smiles[i])
else:
failed_mols.append((i, smiles[i]))
# construct dataset
if train_data:
trainset = SmilesDataset(
clean_smiles, clean_graphs, torch.Tensor(clean_labels))
# construct dataloader
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(
trainset, batch_size=batch_size, shuffle=True,
collate_fn=collate_molgraphs
)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data is None:
return val_dataloader
# construct smiles
smiles = val_data.features(type="series").values.reshape((-1))
labels = val_data.label().astype(np.int32)
smiles_to_g = SMILESToBigraph(
add_self_loop=True,
node_featurizer=CanonicalAtomFeaturizer()
)
graph_list = []
for smile in smiles:
graph_list.append(smiles_to_g(smile))
valid_ids = []
clean_graphs = []
failed_mols = []
clean_labels = []
clean_smiles = []
for i, g in enumerate(graph_list):
if g is not None:
valid_ids.append(i)
clean_graphs.append(g)
clean_labels.append(labels[i])
clean_smiles.append(smiles[i])
else:
failed_mols.append((i, smiles[i]))
# construct dataset
if val_data:
valset = SmilesDataset(
clean_smiles, clean_graphs, torch.Tensor(clean_labels))
# construct dataloader
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(
valset, batch_size=batch_size, shuffle=True,
collate_fn=collate_molgraphs
)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (smiles, bg, label, masks) in enumerate(dataloader):
node_feats = bg.ndata.pop('h')
logits = self.model(bg, node_feats)
label = label.reshape((-1, 1))
loss = lossfunc(logits, label)
val_predicts.append(logits.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 11,572 | 34.069697 | 89 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/densenet/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalDensenetLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,017 | 37.807692 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/densenet/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalDensenetAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,537 | 42.942857 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/densenet/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from PIL import Image
import torchvision.transforms as transforms
from algorithm.core.data_io import CsvReader, NpzReader
from algorithm.model.densenet import DenseNet
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = DenseNet(num_classes=model_config["num_classes"], layers=model_config["layers"])
model = model.to(self.device)
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
elif conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_train(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features()), torch.tensor(train_data.label())
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(
trainset, batch_size, shuffle=True, collate_fn=img_collate_fn
)
return train_dataloader
def _set_val_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_test(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features()), torch.tensor(val_data.label())
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(
valset, batch_size, shuffle=True, collate_fn=img_collate_fn
)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 7,943 | 37.75122 | 101 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/logistic_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
class HorizontalLogisticRegressionLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_label_trainer(self, 'torch', agg_type)
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,042 | 36.145455 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/logistic_regression/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalLogisticRegressionAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,543 | 43.114286 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/logistic_regression/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from algorithm.model.logistic_regression import LogisticRegression
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
from common.utils.config_sync import ConfigSynchronizer
class Common(BaseTrainer):
def __init__(self, train_conf: dict):
sync_rule = {
"model_info": {
"config": All()
},
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = LogisticRegression(input_dim=model_config["input_dim"],
bias=model_config["bias"])
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features(),
dtype=torch.float32).to(self.device),
torch.tensor(train_data.label(), dtype=torch.float32).unsqueeze(
dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(trainset, batch_size, shuffle=True)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features(),
dtype=torch.float32).to(self.device),
torch.tensor(val_data.label(), dtype=torch.float32).unsqueeze(
dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(valset, batch_size, shuffle=True)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 6,601 | 36.089888 | 89 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/logistic_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/horizontal/resnet/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalResnetLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,015 | 37.769231 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/resnet/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalResnetAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,531 | 42.771429 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/resnet/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from PIL import Image
import torchvision.transforms as transforms
from algorithm.core.data_io import CsvReader, NpzReader
from algorithm.model.resnet import ResNet
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = ResNet(num_classes=model_config["num_classes"], layers=model_config["layers"])
model = model.to(self.device)
# model = torch.nn.DataParallel(model)
# torch.backends.cudnn.benchmark = True
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
elif conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_train(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features()), torch.tensor(train_data.label())
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(
trainset, batch_size, shuffle=True, collate_fn=img_collate_fn
)
return train_dataloader
def _set_val_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_test(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features()), torch.tensor(val_data.label())
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(
valset, batch_size, shuffle=True, collate_fn=img_collate_fn
)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 8,036 | 37.826087 | 101 | py |
XFL | XFL-master/python/algorithm/framework/transfer/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from typing import OrderedDict
from functools import partial
from common.utils.config_parser import CommonConfigParser
from algorithm.core.loss.torch_loss import get_lossfunc
from algorithm.core.metrics import get_metric
from algorithm.core.optimizer.torch_optimizer import get_optimizer
from algorithm.core.lr_scheduler.torch_lr_scheduler import get_lr_scheduler
class BaseTrainer:
def __init__(self, train_conf: dict):
self.common_config = CommonConfigParser(train_conf)
if self.common_config.random_seed is not None:
self.set_seed(self.common_config.random_seed)
self.device = self.common_config.device
self._parse_config()
self.model = self._set_model()
self._set_train_dataloader()
self._set_val_dataloader()
self.optimizer = self._set_optimizer()
self.lr_scheduler = self._set_lr_scheduler(self.optimizer)
self.lossfunc = self._set_lossfunc()
self.optimizer = self._set_optimizer()
self.lr_scheduler = self._set_lr_scheduler(self.optimizer)
self.metrics = self._set_metrics()
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def _parse_config(self) -> nn.Module:
raise NotImplementedError("The _parse_config method is not implemented.")
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(self.common_config.optimizer)
optimizer = OrderedDict()
for k, v in optimizer_conf.items():
optimizer[k] = get_optimizer(k)(self.model.parameters(), **v)
return optimizer
def _set_lossfunc(self):
""" Define self.lossfunc """
lossfunc_conf = OrderedDict(self.common_config.lossfunc)
lossfunc = OrderedDict()
for k, v in lossfunc_conf.items():
lossfunc[k] = get_lossfunc(k)(**v)
return lossfunc
def _set_lr_scheduler(self, optimizer: OrderedDict):
lr_scheduler_conf = OrderedDict(self.common_config.lr_scheduler)
lr_scheduler = OrderedDict()
for (k, v), o in zip(lr_scheduler_conf.items(), optimizer.values()):
lr_scheduler[k] = get_lr_scheduler(k)(o, **v)
return lr_scheduler
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.common_config.metric
for k, v in metrics_conf.items():
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
def _set_model(self) -> nn.Module:
raise NotImplementedError("The _set_model method is not implemented.")
def _set_train_dataloader(self):
raise NotImplementedError(
"The _set_train_dataloader method is not implemented.")
def _set_val_dataloader(self):
raise NotImplementedError(
"The _set_val_dataloader method is not implemented.")
| 3,594 | 34.95 | 81 | py |
XFL | XFL-master/python/algorithm/framework/transfer/transfer_model_base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from functools import partial
from common.utils.config_parser import TrainConfigParser
from algorithm.core.metrics import get_metric
class TransferModelBase(TrainConfigParser):
def __init__(self, train_conf: dict, label: bool = False):
super().__init__(train_conf)
self.train_conf = train_conf
self.model_conf = train_conf["model_info"].get("config")
self.label = label
def _parse_config(self) -> None:
self.save_dir = Path(self.output.get("path", ""))
self.metric_dir = self.save_dir
# interaction_params
self.model_name = self.model_info.get("name")
self.save_model_name = self.output.get("model", {}).get("name", {})
self.pretrain_model_path = self.input.get("pretrained_model", {}).get("path")
self.num_features = self.model_conf.get("num_features")
self.hidden_features = self.model_conf.get("hidden_features")
self.constant_k = 1 / self.hidden_features
self.alpha = self.model_conf.get("alpha")
self.global_epoch = self.train_params.get("global_epoch")
self.local_epoch = self.train_params.get("local_epoch")
self.batch_size = self.train_params.get("batch_size")
self.shuffle_seed = self.train_params.get("shuffle_seed")
self.random_seed = self.train_params.get("random_seed")
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.train_params.get("metric", {})
for k, v in metrics_conf.items():
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
| 2,407 | 36.046154 | 85 | py |
XFL | XFL-master/python/algorithm/framework/transfer/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/transfer/logistic_regression/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.framework.transfer.transfer_model_base import TransferModelBase
from common.utils.logger import logger
from common.utils.model_preserver import ModelPreserver
class TransferLogisticRegression(nn.Module):
def __init__(self, input_dim: int, output_dim: int, bias: bool = False):
super().__init__()
self.linear = torch.nn.Linear(input_dim, output_dim, bias=bias)
def forward(self, x):
return self.linear(x)
class TransferLogisticRegressionBase(TransferModelBase):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""_summary_
Args:
train_conf (dict): _description_
label (bool, optional): _description_. Defaults to False.
"""
super().__init__(train_conf)
self._parse_config()
self.label = label
self.model = None
self.phi = None # phi will be saved in the checkpoint of label_trainer
self.overlap_y, self.non_overlap_y = None, None
self.overlap_train_dataloader, self.non_overlap_train_dataloader = None, None
self.eval_dataloader = None
self.metric_functions = {}
self._set_train_dataloader()
self._set_val_dataloader()
def _init_model(self, bias: bool = False) -> None:
"""
Init logistic regression model.
Returns: None
"""
logger.info("Init model start.")
self.model = TransferLogisticRegression(
input_dim=self.num_features, output_dim=self.hidden_features, bias=bias
)
# Load pretrained model if needed.
if self.pretrain_model_path is not None and self.pretrain_model_path != "":
checkpoint = ModelPreserver.load(
os.path.join(self.pretrain_model_path, self.input.get("pretrained_model").get("name")))
state_dict = checkpoint["state_dict"]
if "phi" in state_dict.keys():
self.phi = state_dict.pop("phi")
self.model.load_state_dict(state_dict)
self.model = self.model.to(self.device)
logger.info("Init model completed.")
def _read_data(self, input_dataset, is_train=True):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_id = conf['has_id']
index_col = 0 if has_id else False
train_data = pd.read_csv(path, index_col=index_col)
if is_train:
index_name = "overlap_index.npy"
index_path = os.path.join(conf['path'], index_name)
overlap_index = np.load(index_path)
return train_data, overlap_index
else:
return train_data
else:
raise NotImplementedError(
"Dataset load method {} does not Implemented.".format(conf["type"])
)
def _set_train_dataloader(self):
train_data, overlap_index = self._read_data(self.input_trainset)
self.sample_num = train_data.shape[0]
overlap_train_data = train_data.loc[overlap_index]
if self.label:
non_overlap_index = np.array([])
for i in train_data.index:
if i not in overlap_index:
non_overlap_index = np.append(non_overlap_index, i)
non_overlap_train_data = train_data.loc[non_overlap_index]
# init overlap_y and non_overlap_y
self.overlap_y = torch.tensor(overlap_train_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(1)
self.non_overlap_y = torch.tensor(non_overlap_train_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(1)
# init train_dataloader
overlap_x = torch.tensor(overlap_train_data.iloc[:, 1:].to_numpy(), dtype=torch.float32)
overlap_trainset = TensorDataset(overlap_x, self.overlap_y)
self.overlap_train_dataloader = DataLoader(overlap_trainset, batch_size=self.batch_size, shuffle=False)
non_overlap_x = torch.tensor(non_overlap_train_data.iloc[:, 1:].to_numpy(), dtype=torch.float32)
non_overlap_trainset = TensorDataset(non_overlap_x, self.non_overlap_y)
self.non_overlap_train_dataloader = DataLoader(non_overlap_trainset, batch_size=self.batch_size, shuffle=False)
else:
# init train_dataloader
overlap_x = torch.tensor(overlap_train_data.to_numpy(), dtype=torch.float32)
overlap_trainset = TensorDataset(overlap_x)
self.overlap_train_dataloader = DataLoader(overlap_trainset, batch_size=self.batch_size, shuffle=False)
def _set_val_dataloader(self):
val_data = self._read_data(self.input_valset, is_train=False)
if self.label:
# init val_dataloader
labels = torch.tensor(val_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(dim=-1)
valset = TensorDataset(labels)
self.val_dataloader = DataLoader(valset, batch_size=self.batch_size, shuffle=False)
else:
# init val_dataloader
features = torch.tensor(val_data.to_numpy(), dtype=torch.float32)
valset = TensorDataset(features)
self.val_dataloader = DataLoader(valset, batch_size=self.batch_size, shuffle=False) | 6,172 | 42.167832 | 125 | py |
XFL | XFL-master/python/algorithm/framework/transfer/logistic_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from common.communication.gRPC.python.channel import DualChannel
from common.utils.logger import logger
from service.fed_config import FedConfig
from .common import Common
from common.utils.model_io import ModelIO
from common.evaluation.metrics import CommonMetrics
class TransferLogisticRegressionLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf, label=True)
def cal_phi_and_ua(self, dataloader):
phi = None # [1, hidden_features] Φ_A
ua = []
for batch_idx, (x_batch, y_batch) in enumerate(dataloader):
x_batch = x_batch.to(self.device)
ua_batch = self.model(x_batch) # [batch_size, hidden_features]
ua.append(ua_batch)
phi_batch = torch.sum(y_batch * ua_batch, axis=0).unsqueeze(0)
if phi is None:
phi = phi_batch
else:
phi += phi_batch
ua = torch.concat(ua, axis=0)
return phi, ua
def cal_parameters(self, dual_channel):
overlap_phi, overlap_ua = self.cal_phi_and_ua(self.overlap_train_dataloader)
non_overlap_phi, non_overlap_ua = self.cal_phi_and_ua(self.non_overlap_train_dataloader)
phi = (overlap_phi + non_overlap_phi) / self.sample_num
phi_2 = torch.matmul(phi.T, phi) # (Φ_A)‘(Φ_A) [hidden_features, hidden_features]
overlap_y = self.overlap_y # {C(y)=y} [overlap_size, 1]
overlap_y_2 = overlap_y * overlap_y # {D(y)=y^2} [overlap_size, 1]
# calculate 3 components will be sent to the trainer
overlap_y_2_phi_2 = 0.25 * overlap_y_2.unsqueeze(2) * phi_2 # [overlap_size, hidden_features, hidden_features]
overlap_y_phi = -0.5 * overlap_y * phi # [overlap_size, hidden_features]
comp_ua = -self.constant_k * overlap_ua # [overlap_size, 1]
# exchange components
dual_channel.send((overlap_y_2_phi_2, overlap_y_phi, comp_ua))
ub, ub_2, comp_ub = dual_channel.recv()
# compute gradients to excute backward
overlap_y_2_phi = (overlap_y_2 * phi).unsqueeze(1)
loss_grads_const_part1 = 0.25 * torch.matmul(overlap_y_2_phi, ub_2).squeeze(1)
loss_grads_const_part2 = overlap_y * ub
const = torch.sum(loss_grads_const_part1, axis=0) - 0.5 * torch.sum(loss_grads_const_part2, axis=0)
non_overlap_y = self.non_overlap_y
non_overlap_ua_grad = self.alpha * const * non_overlap_y / self.sample_num
overlap_ua_grad = self.alpha * const * overlap_y / self.sample_num + comp_ub
# compute loss
overlap_num = overlap_y.shape[0]
overlap_loss = torch.sum(comp_ua * ub)
ub_phi = torch.matmul(ub, phi.T)
part1 = -0.5 * torch.sum(overlap_y * ub_phi)
part2 = 1.0 / 8 * torch.sum(ub_phi * ub_phi)
part3 = len(overlap_y) * np.log(2)
loss_y = part1 + part2 + part3
loss = self.alpha * (loss_y / overlap_num) + overlap_loss / overlap_num
ua = torch.concat((overlap_ua, non_overlap_ua), axis=0)
ua_grad = torch.concat((overlap_ua_grad, non_overlap_ua_grad), axis=0)
# update phi
self.phi = phi
return loss, ua, ua_grad
def train_loop(self, optimizer, lr_scheduler, dual_channel):
loss_sum = 0
for lepoch in range(1, self.local_epoch + 1):
loss, ua, ua_grad = self.cal_parameters(dual_channel)
optimizer.zero_grad()
ua.backward(ua_grad)
optimizer.step()
loss_sum += loss
if lr_scheduler:
lr_scheduler.step()
loss_sum /= self.local_epoch
logger.info(f"loss: {loss_sum}")
def val_loop(self, dual_channel, global_epoch: int = 0):
logger.info("val_loop start")
self.model.eval()
labels = []
for batch_idx, [y_batch] in enumerate(self.val_dataloader):
labels.append(y_batch.numpy())
ub = dual_channel.recv()
predict_score = torch.matmul(ub, self.phi.T)
val_predicts = torch.sigmoid(predict_score)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=None,
loss=None,
dataset_type="val",
)
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=None,
dataset_type="val",
)
def fit(self):
logger.info("Transfer logistic regression training start")
dual_channel = DualChannel(
name="transfer_logistic_regression_channel",
ids=FedConfig.get_trainer()+[FedConfig.node_id]
)
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for epoch in range(1, self.global_epoch + 1):
self.model.train()
logger.info(f"trainer's global epoch {epoch}/{self.global_epoch} start...")
self.train_loop(optimizer, lr_scheduler, dual_channel)
self.val_loop(dual_channel, global_epoch=epoch)
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
meta_dict={"phi": self.phi}
)
| 6,285 | 38.534591 | 118 | py |
XFL | XFL-master/python/algorithm/framework/transfer/logistic_regression/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from pathlib import Path
from ..base import BaseTrainer
from common.utils.logger import logger
from common.utils.model_io import ModelIO
from common.checker.x_types import All
from common.utils.config_sync import ConfigSynchronizer
class TransferLogisticRegression(nn.Module):
def __init__(self, input_dim: int, output_dim: int, bias: bool = False):
super().__init__()
self.linear = torch.nn.Linear(input_dim, output_dim, bias=bias)
def forward(self, x):
return self.linear(x)
class Common(BaseTrainer):
def __init__(self, train_conf: dict, label: bool = False):
sync_rule = {
"model_info": {
"config": All()
},
"train_info": {
"interaction_params": All(),
"train_params": All()
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
self.label = label
super().__init__(train_conf)
self._parse_config()
def _parse_config(self) -> None:
self.save_dir = Path(self.common_config.output.get("path", ""))
# interaction_params
self.model_name = self.common_config.model_info.get("name")
self.save_model_name = self.common_config.output.get("model", {}).get("name", {})
self.pretrain_model_path = self.common_config.input.get("pretrained_model", {}).get("path")
self.pretrain_model_name = self.common_config.input.get("pretrained_model", {}).get("name")
self.model_conf = self.common_config.model_info.get("config", {})
self.num_features = self.model_conf.get("num_features")
self.hidden_features = self.model_conf.get("hidden_features")
self.constant_k = 1 / self.hidden_features
self.alpha = self.model_conf.get("alpha")
self.bias = self.model_conf.get("bias", False)
self.global_epoch = self.common_config.train_params.get("global_epoch")
self.local_epoch = self.common_config.train_params.get("local_epoch")
self.train_batch_size = self.common_config.train_params.get("train_batch_size")
self.val_batch_size = self.common_config.train_params.get("val_batch_size")
def _set_model(self) -> None:
"""
Init logistic regression model.
Returns: None
"""
logger.info("Init model start.")
self.phi = None # phi will be saved in the model_info of label_trainer
model = TransferLogisticRegression(
input_dim=self.num_features, output_dim=self.hidden_features, bias=self.bias
)
# Load pretrained model if needed.
if self.pretrain_model_path is not None and self.pretrain_model_path != "":
model_info = ModelIO.load_torch_model(
os.path.join(self.pretrain_model_path, self.pretrain_model_name))
state_dict = model_info["state_dict"]
if "phi" in state_dict.keys():
self.phi = model_info["phi"]
model.load_state_dict(state_dict)
model = model.to(self.device)
logger.info("Init model completed.")
return model
def _read_data(self, input_dataset, is_train=True):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_id = conf['has_id']
index_col = 0 if has_id else False
train_data = pd.read_csv(path, index_col=index_col)
if is_train:
index_name = "overlap_index.npy"
index_path = os.path.join(conf['path'], index_name)
overlap_index = np.load(index_path)
return train_data, overlap_index
else:
return train_data
else:
raise NotImplementedError(
"Dataset load method {} does not Implemented.".format(conf["type"])
)
def _set_train_dataloader(self):
self.overlap_y, self.non_overlap_y = None, None
self.overlap_train_dataloader, self.non_overlap_train_dataloader = None, None
train_data, overlap_index = self._read_data(self.common_config.input_trainset)
self.sample_num = train_data.shape[0]
overlap_train_data = train_data.loc[overlap_index]
if self.label:
non_overlap_index = np.array([])
for i in train_data.index:
if i not in overlap_index:
non_overlap_index = np.append(non_overlap_index, i)
non_overlap_train_data = train_data.loc[non_overlap_index]
if len(non_overlap_index) == 0:
raise ValueError("There is no non-overlap data in the trainset. If non_overlap_index is empty, there is no need to use transfer learning")
# init overlap_y and non_overlap_y
self.overlap_y = torch.tensor(
overlap_train_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(1)
self.non_overlap_y = torch.tensor(
non_overlap_train_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(1)
# init train_dataloader
overlap_x = torch.tensor(
overlap_train_data.iloc[:, 1:].to_numpy(), dtype=torch.float32)
overlap_trainset = TensorDataset(overlap_x, self.overlap_y)
self.overlap_train_dataloader = DataLoader(
overlap_trainset, batch_size=self.train_batch_size, shuffle=False)
non_overlap_x = torch.tensor(
non_overlap_train_data.iloc[:, 1:].to_numpy(), dtype=torch.float32)
non_overlap_trainset = TensorDataset(non_overlap_x, self.non_overlap_y)
self.non_overlap_train_dataloader = DataLoader(
non_overlap_trainset, batch_size=self.train_batch_size, shuffle=False)
else:
# init train_dataloader
overlap_x = torch.tensor(overlap_train_data.to_numpy(), dtype=torch.float32)
overlap_trainset = TensorDataset(overlap_x)
self.overlap_train_dataloader = DataLoader(
overlap_trainset, batch_size=self.train_batch_size, shuffle=False)
def _set_val_dataloader(self):
self.val_dataloader = None
val_data = self._read_data(self.common_config.input_valset, is_train=False)
if self.label:
# init val_dataloader
labels = torch.tensor(val_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(dim=-1)
valset = TensorDataset(labels)
self.val_dataloader = DataLoader(valset, batch_size=self.val_batch_size, shuffle=False)
else:
# init val_dataloader
features = torch.tensor(val_data.to_numpy(), dtype=torch.float32)
valset = TensorDataset(features)
self.val_dataloader = DataLoader(valset, batch_size=self.val_batch_size, shuffle=False)
| 7,724 | 42.644068 | 154 | py |
XFL | XFL-master/python/algorithm/framework/transfer/logistic_regression/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from common.communication.gRPC.python.channel import DualChannel
from common.utils.logger import logger
from service.fed_config import FedConfig
from .common import Common
from common.utils.model_io import ModelIO
class TransferLogisticRegressionTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf, label=False)
def cal_ub(self, dataloader):
ub = []
for batch_idx, [x_batch] in enumerate(dataloader):
x_batch = x_batch.to(self.device)
ub_batch = self.model(x_batch) # [batch_size, hidden_features]
ub.append(ub_batch)
ub = torch.concat(ub, axis=0)
return ub
def cal_parameters(self, dual_channel):
ub = self.cal_ub(self.overlap_train_dataloader) # [overlap_size, hidden_features]
# calculate 3 components will be sent to the trainer
ub_ex = ub.unsqueeze(1)
ub_2 = torch.matmul(ub.unsqueeze(2), ub_ex) # [overlap_size, hidden_features, hidden_features]
comp_ub = -self.constant_k * ub # [overlap_size, hidden_features]
# exchange components
overlap_y_2_phi_2, overlap_y_phi, comp_ua = dual_channel.recv()
dual_channel.send((ub, ub_2, comp_ub))
# compute gradients to excute backward
ub_overlap_y_2_phi_2 = torch.matmul(ub_ex, overlap_y_2_phi_2)
l1_grad_b = ub_overlap_y_2_phi_2.squeeze(1) + overlap_y_phi
ub_grad = self.alpha * l1_grad_b + comp_ua
return ub, ub_grad
def fit(self):
logger.info("Transfer logistic regression training start")
dual_channel = DualChannel(
name="transfer_logistic_regression_channel",
ids=FedConfig.get_label_trainer()+[FedConfig.node_id]
)
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for epoch in range(1, self.global_epoch + 1):
self.model.train()
logger.info(f"trainer's global epoch {epoch}/{self.global_epoch} start...")
self.train_loop(optimizer, lr_scheduler, dual_channel)
self.val_loop(dual_channel)
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name
)
def train_loop(self, optimizer, lr_scheduler, dual_channel):
for lepoch in range(1, self.local_epoch + 1):
ub, ub_grad = self.cal_parameters(dual_channel)
optimizer.zero_grad()
ub.backward(ub_grad)
optimizer.step()
if lr_scheduler:
lr_scheduler.step()
def val_loop(self, dual_channel):
self.model.eval()
ub = self.cal_ub(self.val_dataloader)
dual_channel.send(ub)
| 3,462 | 36.236559 | 102 | py |
XFL | XFL-master/python/algorithm/framework/vertical/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/vertical/vertical_model_base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import pandas as pd
import numpy as np
from common.evaluation.metrics import BiClsMetric, DecisionTable, RegressionMetric, ThresholdCutter, LiftGainCalculator
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from sklearn.metrics import roc_curve
import json
from sklearn.metrics import precision_recall_curve
from algorithm.core.tree.feature_importance import FeatureImportance
from algorithm.core.tree.feature_importance import FeatureImportance
class VerticalModelBase(TrainConfigParser):
def __init__(self, train_conf: dict, label: bool = False):
super().__init__(train_conf)
self._parse_config()
self.train_conf = train_conf
self.label = label
self.train_ks_metrics = None
self.val_ks_metrics = None
def _parse_config(self) -> None:
# output_path
# self.save_dir = Path(self.output.get("model", {}).get("path", ""))
self.save_dir = Path(self.output.get("path", ""))
self.metric_dir = self.save_dir
# params
self.lossfunc_conifg = self.train_params.get("lossfunc", {})
self.metric_config = self.train_params.get("metric", {})
# interaction_params
self.echo_training_metrics = self.interaction_params.get(
"echo_training_metrics", False)
self.write_training_prediction = self.interaction_params.get(
"write_training_prediction", False)
self.write_validation_prediction = self.interaction_params.get(
"write_validation_prediction", False)
# if self.output.get("metrics"):
# self.metric_path = Path(self.output["metrics"].get("path"))
# else:
# self.metric_path = self.save_dir
# # params
# self.lossfunc_conifg = self.train_params.get("lossfunc_config")
# self.metric_config = self.train_params.get("metric_config")
# # interaction_params
# self.echo_training_metrics = self.interaction_params.get("echo_training_metrics")
# self.write_training_prediction = self.interaction_params.get("write_training_prediction")
# self.write_validation_prediction = self.interaction_params.get("write_validation_prediction")
def _calc_metrics(self, y, p, epoch, stage="train", loss={}):
if stage == "train" and not self.echo_training_metrics:
return
if not os.path.exists(self.metric_dir):
os.makedirs(self.metric_dir)
# output_file = os.path.join(
# self.metric_path, "{}_metrics.csv".format(stage))
output_file = os.path.join(
self.metric_dir, self.output.get("metric_" + stage)["name"])
if self.model_info["name"] not in ["vertical_linear_regression", "vertical_poisson_regression"]:
if loss:
evaluate = BiClsMetric(epoch, output_file, self.metric_config)
else:
evaluate = BiClsMetric(
epoch, output_file, self.metric_config, self.lossfunc_conifg)
else:
evaluate = RegressionMetric(epoch, output_file, self.metric_config)
evaluate.calc_metrics(y, p)
for key, value in loss.items():
evaluate.metrics[key] = value
if self.model_info["name"] not in ["vertical_linear_regression", "vertical_poisson_regression"]:
evaluate.save()
else:
evaluate.save(evaluate.metrics)
if "decision_table" in self.metric_config:
dt = DecisionTable(self.metric_config["decision_table"])
dt.fit(y, p)
dt.save(os.path.join(self.metric_dir, self.output.get(
"decision_table_" + stage)["name"]))
# dt.save(os.path.join(self.metric_path,
# "{}_decision_table.csv".format(stage)))
logger.info("{} {}".format(stage, evaluate))
return evaluate.metrics
def _write_loss(self, train_loss, val_loss, epoch):
# prepare write path
try:
file_path = Path(
self.metric_dir,
self.train_conf['output']['plot_loss']['name']
)
except:
file_path = Path(
self.metric_dir,
f"{self.model_info['name']}_plot_loss.json"
)
if file_path.is_file():
with open(file_path, "r") as file:
prev_loss_list = json.load(file)
else:
prev_loss_list = []
prev_loss_list.append(
{'epoch': epoch, 'loss': train_loss, 'period': 'train'})
prev_loss_list.append(
{'epoch': epoch, 'loss': val_loss, 'period': 'val'})
with open(file_path, "w") as out_fp:
json.dump(prev_loss_list, out_fp)
def _write_prediction(self, y, p, idx=None, epoch=None, final=False, stage="train"):
if stage == "train" and not self.write_training_prediction:
return
elif stage == "val" and not self.write_validation_prediction:
return
elif stage not in ("train", "val"):
raise ValueError("stage must be 'train' or 'val'.")
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
if final:
# file_name = os.path.join(self.save_dir, "predicted_probabilities_{}.csv".format(stage))
file_name = os.path.join(
self.save_dir, self.output.get("prediction_" + stage)["name"])
else:
# file_name = os.path.join(self.save_dir, "predicted_probabilities_{}.epoch_{}".format(stage, epoch))
file_name = self.output.get("prediction_" + stage)["name"]
file_name_list = file_name.split(".")
# file_name_list.insert(-1, '_epoch_'+str(epoch))
file_name_list[-2] += '_epoch_' + str(epoch)
file_name = '.'.join(file_name_list)
file_name = os.path.join(self.save_dir, file_name)
if idx is None:
df = pd.DataFrame({"pred": p, "label": y})
df = df.reset_index().rename(columns={"index": "id"})
df.to_csv(file_name, header=True, index=False, float_format='%.6g')
else:
df = pd.DataFrame({'id': idx, "pred": p, "label": y})
df.to_csv(file_name, header=True, index=False, float_format='%.6g')
def _write_plot_data(self):
pass
@staticmethod
def __prune_data_size(data: np.array, n_cuts: int):
size = data.size
cuts = np.linspace(0, 1, n_cuts + 1)
index_list = [int(size * cut) for cut in cuts]
if index_list[-1] >= size:
index_list = index_list[:-1]
return data[index_list]
def _write_roc_data(self, train_label, train_pred, val_label=None, val_pred=None, n_cuts=1000) -> None:
# prepare write path
try:
file_path = Path(
self.metric_dir,
self.train_conf['output']['plot_roc']['name']
)
except:
file_path = Path(
self.metric_dir,
f"{self.model_info['name']}_plot_roc.json"
)
try:
train_fpr, train_tpr, _ = roc_curve(train_label, train_pred)
except:
logger.error("Could not calculate train roc curve")
return
# pruning
size = train_fpr.size
if size > n_cuts:
logger.info(
f"Too much points in training roc curve. Cutting to {n_cuts} points")
train_fpr = self.__prune_data_size(train_fpr, n_cuts)
train_tpr = self.__prune_data_size(train_tpr, n_cuts)
# add to roc_list
roc_list = []
for fpr, tpr in zip(train_fpr, train_tpr):
roc_list.append(
{"fpr": round(fpr, 6), "tpr": round(tpr, 6), "period": "train"})
# If val data exists
if not val_label is None:
try:
val_fpr, val_tpr, _ = roc_curve(val_label, val_pred)
except:
logger.error("Could not calculate val roc curve")
with open(file_path, "w") as file:
json.dump(roc_list, file)
return
# pruning
if val_fpr.size > n_cuts:
logger.info(
f"Too much points in validation roc curve. Cutting to {n_cuts} points")
val_fpr = self.__prune_data_size(val_fpr, n_cuts)
val_tpr = self.__prune_data_size(val_tpr, n_cuts)
for fpr, tpr in zip(val_fpr, val_tpr):
roc_list.append(
{"fpr": round(fpr, 6), "tpr": round(tpr, 6), "period": "val"})
# Sort
logger.info("Sorting roc list")
roc_list = sorted(roc_list, key=lambda el: el['fpr'])
logger.info("Writing roc to file")
with open(file_path, "w") as file:
json.dump(roc_list, file)
return
def _write_ks_data(self, train_label, train_pred, val_label=None, val_pred=None, n_cuts=1000) -> None:
# Setup file path
try:
file_path = Path(
self.metric_dir,
self.train_conf['output']['plot_ks']['name']
)
except:
file_path = Path(
self.metric_dir,
f"{self.model_info['name']}_plot_ks.json"
)
ks_list = []
tc = ThresholdCutter()
# Train
if self.train_ks_metrics is None:
tc.sim_cut_by_value(train_label, train_pred)
train_ks_df = pd.DataFrame(tc.metrics)
train_bst_threshold = tc.bst_threshold
train_bst_score = tc.bst_score
else:
logger.info(f"Using calculated train ks")
train_ks_df = pd.DataFrame(self.train_ks_metrics)
train_bst_threshold = self.train_ks_bst_threshold
train_bst_score = self.train_ks_bst_score
# pruning
if train_ks_df.shape[0] > n_cuts:
logger.info(
f"Pruning training ks data. Before pruning: {train_ks_df.shape[0]}")
prune_train_ks_df = pd.DataFrame()
prune_train_ks_df['threshold'] = self.__prune_data_size(
train_ks_df['threshold'].values, n_cuts)
prune_train_ks_df['tpr'] = self.__prune_data_size(
train_ks_df['tpr'].values, n_cuts)
prune_train_ks_df['fpr'] = self.__prune_data_size(
train_ks_df['fpr'].values, n_cuts)
train_ks_df = prune_train_ks_df
logger.info(
f"After pruning, training ks data: {train_ks_df.shape[0]}")
for _, row in train_ks_df.iterrows():
ks_list.append(
{"thres": round(row['threshold'], 6), "value": round(row['tpr'], 6), "type_period": "tpr_train"})
ks_list.append(
{"thres": round(row['threshold'], 6), "value": round(row['fpr'], 6), "type_period": "fpr_train"})
ks_list.append(
{"thres": round(row['threshold'], 6), "value": round(row['ks'], 6), "type_period": "ks_train"})
# Val
if not val_label is None:
if self.val_ks_metrics is None:
tc.sim_cut_by_value(val_label, val_pred)
val_ks_df = pd.DataFrame(tc.metrics)
val_bst_threshold = tc.bst_threshold
val_bst_score = tc.bst_score
else:
logger.info(f"Using calculated val ks")
val_ks_df = pd.DataFrame(self.val_ks_metrics)
val_bst_threshold = self.val_ks_bst_threshold
val_bst_score = self.val_ks_bst_score
# pruning
if val_ks_df.shape[0] > n_cuts:
logger.info(
f"Pruning val ks data. Before pruning: {val_ks_df.shape[0]}")
prune_val_ks_df = pd.DataFrame()
prune_val_ks_df['threshold'] = self.__prune_data_size(
val_ks_df['threshold'].values, n_cuts)
prune_val_ks_df['tpr'] = self.__prune_data_size(
val_ks_df['tpr'].values, n_cuts)
prune_val_ks_df['fpr'] = self.__prune_data_size(
val_ks_df['fpr'].values, n_cuts)
val_ks_df = prune_val_ks_df
logger.info(
f"After pruning, val ks data: {val_ks_df.shape[0]}")
for _, row in val_ks_df.iterrows():
ks_list.append(
{"thres": round(row['threshold'], 6), "value": round(row['tpr'], 6), "type_period": "tpr_val"})
ks_list.append(
{"thres": round(row['threshold'], 6), "value": round(row['fpr'], 6), "type_period": "fpr_val"})
ks_list.append(
{"thres": round(row['threshold'], 6), "value": round(row['ks'], 6), "type_period": "ks_val"})
# Sort lists
logger.info("Sorting ks list")
ks_list = sorted(ks_list, key=lambda el: el["thres"])
logger.info("Writing ks to file")
with open(file_path, "w") as file:
json.dump(ks_list, file)
return
def _write_lift_gain_data(self, train_label, train_pred, val_label=None, val_pred=None) -> None:
# Setup file path
try:
lift_file_path = Path(
self.metric_dir, self.train_conf['output']['plot_lift']['name']
)
except:
lift_file_path = Path(
self.metric_dir, f"{self.model_info['name']}_plot_lift.json"
)
try:
gain_file_path = Path(
self.metric_dir, self.train_conf['output']['plot_gain']['name']
)
except:
gain_file_path = Path(
self.metric_dir, f"{self.model_info['name']}_plot_gain.json"
)
lift_gain_cal = LiftGainCalculator()
lift_gain_cal.cal_lift_gain(train_label, train_pred)
train_lift_gain_df = lift_gain_cal.metrics
train_lift_gain_df = train_lift_gain_df.query("lift.notnull()")
# cut_list = []
gain_list = []
lift_list = []
for _, row in train_lift_gain_df.iterrows():
gain_list.append(
{
"bin_val": round(row['percentage_data'], 6),
"gain": round(row['cum_gain'], 6),
"period": "train"
}
)
lift_list.append(
{
"bin_val": round(row['percentage_data'], 6),
"lift": round(row['lift'], 6),
"period": "train"
}
)
logger.info(f"Training lift point number: {len(lift_list)}")
if not val_label is None:
lift_gain_cal.cal_lift_gain(val_label, val_pred)
val_lift_gain_df = lift_gain_cal.metrics
val_lift_gain_df = val_lift_gain_df.query("lift.notnull()")
for _, row in val_lift_gain_df.iterrows():
gain_list.append(
{
"bin_val": round(row['percentage_data'], 6),
"gain": round(row['cum_gain'], 6),
"period": "val"
}
)
lift_list.append(
{
"bin_val": round(row['percentage_data'], 6),
"lift": round(row['lift'], 6),
"period": "val"
}
)
logger.info(f"Val lift point number: {len(lift_list)}")
# Sort values by horizontal axis
logger.info("Sorting gain and list lists")
gain_list = sorted(gain_list, key=lambda el: el["bin_val"])
lift_list = sorted(lift_list, key=lambda el: el["bin_val"])
with open(lift_file_path, "w") as lift_file:
json.dump(lift_list, lift_file)
with open(gain_file_path, "w") as gain_file:
json.dump(gain_list, gain_file)
return
def _write_pr_data(self, train_label, train_pred, val_label=None, val_pred=None, n_cuts=1000) -> None:
# Set up file path
try:
pr_curve_path = Path(
self.metric_dir,
self.train_conf['output']['plot_precision_recall']['name']
)
except:
pr_curve_path = Path(
self.metric_dir,
f"{self.model_info['name']}_plot_pr_curve.json"
)
pr_list = []
# Add train pr data
precision, recall, thres = precision_recall_curve(
train_label, train_pred)
# pruning
if precision.size > n_cuts:
logger.info(f"Too much points in training pr curve, pruning")
precision = self.__prune_data_size(precision, n_cuts)
recall = self.__prune_data_size(recall, n_cuts)
for pr, rc in zip(precision, recall):
pr_list.append(
{
"recall": round(rc, 6),
"precision": round(pr, 6),
"period": "train"
}
)
# Add val pr data
if not val_label is None:
precision, recall, thres = precision_recall_curve(
val_label, val_pred)
# pruning
if precision.size > n_cuts:
logger.info("Too much points in validation pr curve, pruning")
precision = self.__prune_data_size(precision, n_cuts)
recall = self.__prune_data_size(recall, n_cuts)
for pr, rc in zip(precision, recall):
pr_list.append(
{
"recall": round(rc, 6),
"precision": round(pr, 6),
"period": "val"
}
)
# Sort
logger.info("Sorting pr list")
pr_list = sorted(pr_list, key=lambda el: el["recall"])
# Write file
with open(pr_curve_path, "w") as pr_file:
json.dump(pr_list, pr_file)
return
def _write_feature_importance(self):
# Get importance file path
try:
feature_importance_file_path = Path(
self.metric_dir,
self.train_conf['output']['plot_feature_importance']['name']
)
except:
feature_importance_file_path = Path(
self.metric_dir,
f"{self.model_info['name']}_plot_feature_importance.json"
)
logger.info(f"Feature importances: {self.feature_importances_}")
feature_importance_list = []
# Get and normalize feature importance
try:
normalizer = np.sum([_.get()
for _ in self.feature_importances_.values()])
except:
normalizer = np.sum(
[abs(_) for _ in self.feature_importances_.values()])
for k, v in sorted(self.feature_importances_.items(), key=lambda d: d[1], reverse=True):
feature_name = "_".join(map(str, k))
if isinstance(v, FeatureImportance):
feature_importance = v.get() / normalizer
else:
feature_importance = v / normalizer
feature_importance_list.append(
{
"feature": feature_name,
"importance": round(feature_importance, 6)
}
)
feature_importance_list = sorted(
feature_importance_list, key=lambda d: abs(d['importance']), reverse=True
)
# Write file
with open(feature_importance_file_path, "w") as feature_importance_file:
json.dump(feature_importance_list, feature_importance_file)
| 20,479 | 38.384615 | 119 | py |
XFL | XFL-master/python/algorithm/framework/vertical/pearson/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import string
from pathlib import Path
from typing import List
import pandas as pd
from algorithm.core.encryption_param import PlainParam, get_encryption_param
from common.crypto.paillier.utils import get_core_num
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
class VerticalPearsonBase(TrainConfigParser):
def __init__(self, train_conf: dict, label: bool = False):
super().__init__(train_conf)
self.label = label
self.max_num_cores = 1
self.sample_size = None
self.encryption = "plain"
self.encryption_param = PlainParam()
self._init_config()
self._init_data()
self._local_summary = {}
self._summary = {}
self.num_embed = 10
def _init_data(self):
logger.info("init data loader.")
if not self.input_trainset:
return None
input_info = self.input_trainset[0]
file_path = str(Path(input_info.get("path"), input_info.get("name")))
type_ = input_info.get("type", "None")
if input_info.get("has_id", True):
self.index_col = input_info.get("index_col", 'id')
else:
self.index_col = None
if input_info.get("has_label", False):
label_name = input_info.get("label_name", 'y')
self.label = True
else:
label_name = None
if type_ == "csv":
if self.computing_engine == "local":
df = pd.read_csv(file_path, index_col=self.index_col)
else:
raise NotImplementedError("Computing engine {} is not supported.".format(self.computing_engine))
else:
raise NotImplementedError("Dataset type {} is not supported.".format(type_))
if self.label:
feature_cols = [_ for _ in df.columns if _ != label_name]
self.train_features = df[feature_cols]
if label_name:
self.train_label = df[label_name]
else:
self.train_label = None
else:
self.train_features = df
self.train_ids = df.index
def _init_config(self):
params = self.train_info.get("train_params")
self.col_index = params.get("col_index", -1)
self.col_names = params.get("col_names", '')
encryption_params = params.get("encryption", {"plain": {}})
self.encryption = list(encryption_params.keys())[0]
encryption_param = encryption_params[self.encryption]
self.encryption_param = get_encryption_param(self.encryption, encryption_param)
self.sample_size = params.get("sample_size", None)
if self.encryption == "paillier" and self.encryption_param.parallelize_on:
self.max_num_cores = get_core_num(params.get("max_num_cores", 999))
else:
self.max_num_cores = 1
def _select_columns(self):
if self.col_index == -1:
return self.train_features
elif isinstance(self.col_index, list):
feature_start_index = 0
if self.index_col is not None:
feature_start_index += 1
if self.label:
feature_start_index += 1
feature_names = self.train_features.columns.to_list()
select_feature_cols = [feature_names[_ - feature_start_index] for _ in self.col_index]
if self.col_names:
for f in self.col_names.split(','):
if f not in select_feature_cols:
select_feature_cols.append(f)
select_feature_cols.sort(key=lambda d: feature_names.index(d))
return self.train_features[select_feature_cols]
else:
raise ValueError("col_index must be -1 or a list of int.")
@staticmethod
def standardize(x):
mu = x.mean()
sigma = x.std()
if sigma > 0:
return (x - mu) / sigma
else:
return x - mu
@staticmethod
def string_encryption(str_list: List[str]):
ret = {}
for s in str_list:
ret[s] = ''.join(random.sample(string.ascii_letters + string.digits, 16))
return ret
| 4,149 | 31.677165 | 100 | py |
XFL | XFL-master/python/algorithm/framework/vertical/pearson/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from pathlib import Path
import numpy as np
from common.checker.matcher import get_matched_config
from common.checker.x_types import All
from service.fed_config import FedConfig
from service.fed_node import FedNode
from service.fed_control import ProgressCalculator
from .base import VerticalPearsonBase
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.utils.logger import logger
from algorithm.core.encryption_param import PlainParam, PaillierParam
from common.crypto.paillier.paillier import Paillier
from algorithm.core.paillier_acceleration import embed, umbed
class VerticalPearsonLabelTrainer(VerticalPearsonBase):
def __init__(self, train_conf: dict):
"""
Args:
train_conf:
"""
self.sync_channel = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, label=False)
self.feature_mapping = dict()
self.channels = dict()
self.channels["encryption_context"] = BroadcastChannel(name="encryption_context")
self.channels["sample_idx_com"] = BroadcastChannel(name="sample_idx_com")
self.channels["trainer_corr_com"] = dict()
self.channels["trainer_feature_com"] = dict()
self.node_id = FedNode.node_id
for party_id in FedConfig.get_trainer():
self.channels["trainer_feature_com"][party_id] = DualChannel(
name="trainer_feature_com_" + party_id, ids=[self.node_id, party_id]
)
for party_id in FedConfig.get_trainer():
self.channels["trainer_corr_com"][party_id] = DualChannel(
name="trainer_corr_com_" + party_id, ids=[self.node_id, party_id]
)
if isinstance(self.encryption_param, (PlainParam, type(None))):
self.private_context = None
elif isinstance(self.encryption_param, PaillierParam):
self.private_context = Paillier.context(
self.encryption_param.key_bit_size,
self.encryption_param.djn_on
)
self.public_context = self.private_context.to_public()
self.channels["encryption_context"].broadcast(self.public_context.serialize(), use_pickle=False)
else:
raise TypeError(f"Encryption param type {type(self.encryption_param)} not valid.")
def _sync_config(self, config):
sync_rule = {
"train_info": All()
}
config_to_sync = get_matched_config(config, sync_rule)
self.sync_channel.broadcast(config_to_sync)
def fit(self):
logger.info("vertical pearson label trainer start.")
data = self._select_columns()
if self.sample_size is not None and self.sample_size < len(self.train_ids):
logger.info("sampled %d data." % self.sample_size)
sample_ids = np.random.choice(np.arange(len(self.train_ids)), self.sample_size)
self.channels["sample_idx_com"].broadcast(sample_ids)
data = data.iloc[sample_ids]
data = data.apply(self.standardize)
n = len(data)
feature_names = data.columns.to_list()
self.feature_mapping = self.string_encryption(feature_names)
local_corr = np.dot(data.T, data)
local_corr /= n
self._summary["corr"] = dict()
self._summary["corr"][(self.node_id, self.node_id)] = local_corr
self._summary["features"] = {
self.node_id: feature_names
}
self._summary["num_features"] = {
self.node_id: len(feature_names)
}
remote_corr = dict()
for party_id in FedConfig.get_trainer():
remote_corr[party_id] = []
if len(feature_names):
self.channels["trainer_feature_com"][party_id].send(False)
else:
self.channels["trainer_feature_com"][party_id].send(True)
if isinstance(self.encryption_param, (PlainParam, type(None))):
for idx, f in enumerate(feature_names):
for party_id in FedConfig.get_trainer():
if f != feature_names[-1]:
self.channels["trainer_feature_com"][party_id].send(
(data[f].to_numpy(), False))
else:
self.channels["trainer_feature_com"][party_id].send(
(data[f].to_numpy(), True))
logger.info("label trainer encrypted %d features." % (idx + 1))
for party_id in FedConfig.get_trainer():
corr_mat = self.channels["trainer_feature_com"][party_id].recv()
remote_corr[party_id].append(corr_mat)
for party_id in FedConfig.get_trainer():
remote_corr[party_id] = np.array(remote_corr[party_id]) / n
if isinstance(self.encryption_param, PaillierParam):
for i in range(0, len(feature_names), self.num_embed):
embedded_data = embed(data.iloc[:, i:(i + self.num_embed)].to_numpy().T)
encrypted_data = Paillier.encrypt(
context=self.private_context,
data=embedded_data,
obfuscation=True,
num_cores=self.max_num_cores
)
for party_id in FedConfig.get_trainer():
if i + self.num_embed >= len(feature_names):
self.channels["trainer_feature_com"][party_id].send(
(encrypted_data, True))
logger.info("label trainer encrypted %d features." % len(feature_names))
else:
self.channels["trainer_feature_com"][party_id].send(
(encrypted_data, False))
logger.info("label trainer encrypted %d features." % (i + self.num_embed))
for party_id in FedConfig.get_trainer():
emb_enc_corr_mat = self.channels["trainer_feature_com"][party_id].recv()
emb_corr_mat = Paillier.decrypt(
self.private_context, emb_enc_corr_mat, num_cores=self.max_num_cores, out_origin=True
)
result = []
if i + self.num_embed >= len(feature_names):
umbed_num = len(feature_names) - i
else:
umbed_num = self.num_embed
for r in umbed(emb_corr_mat, umbed_num):
result.append(r)
remote_corr[party_id].append(np.array(result) / (10 ** self.encryption_param.precision))
for party_id in FedConfig.get_trainer():
if remote_corr[party_id]:
remote_corr[party_id] = np.concatenate(remote_corr[party_id]) / n
logger.info("label trainer encrypted all features.")
for party_id in FedConfig.get_trainer():
remote_features = self.channels["trainer_feature_com"][party_id].recv()
self._summary["features"][party_id] = remote_features
self._summary["num_features"][party_id] = len(remote_features)
self._summary["corr"][(self.node_id, party_id)] = remote_corr[party_id]
logger.info("label trainer get remote correlation matrices.")
logger.info("get correlation matrix between trainers.")
for party_id in FedConfig.get_trainer():
other_summary = self.channels["trainer_corr_com"][party_id].recv()
for k, v in other_summary.items():
if k[0] != k[1]:
if self.encryption == "plain":
self._summary["corr"][k] = v[0].T / n
elif self.encryption == "paillier":
remote_corr = []
for emb_enc_corr_mat, pack_num in zip(v[0], v[1]):
emb_corr_mat = Paillier.decrypt(
self.private_context, emb_enc_corr_mat, num_cores=self.max_num_cores, out_origin=True
)
corr_mat = []
for r in umbed(emb_corr_mat, pack_num):
corr_mat.append(r)
corr_mat = np.array(corr_mat) / (10 ** self.encryption_param.precision)
remote_corr.append(corr_mat)
if remote_corr:
self._summary["corr"][k] = np.concatenate(remote_corr).T / n
else:
self._summary["corr"][k] = []
else:
self._summary["corr"][k] = v
# update the progress of 100 to show the training is finished
ProgressCalculator.finish_progress()
self.save()
def save(self):
save_dir = str(Path(self.output.get("path")))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_name = self.output.get("corr")["name"]
model_path = Path(save_dir, model_name)
summary = self.merge_summary()
with open(model_path, 'wb') as f:
pickle.dump(summary, f)
logger.info("model saved as: {}.".format(model_path))
def merge_summary(self):
parties = FedConfig.get_label_trainer() + FedConfig.get_trainer()
cor_mat = []
features = []
sources = []
num_feature = 0
for i in range(len(parties)):
row = []
for j in range(len(parties)):
if (parties[i], parties[j]) in self._summary["corr"]:
corr_mat = self._summary["corr"][(parties[i], parties[j])]
if min(np.array(corr_mat).shape) > 0:
row.append(corr_mat)
else:
row.append(np.zeros(
(self._summary["num_features"][parties[i]], self._summary["num_features"][parties[j]])))
else:
corr_mat = self._summary["corr"][(parties[j], parties[i])]
if min(np.array(corr_mat).shape) > 0:
row.append(corr_mat.T)
else:
row.append(np.zeros(
(self._summary["num_features"][parties[i]], self._summary["num_features"][parties[j]])))
cor_mat.append(np.concatenate(row, axis=1))
features.extend(self._summary["features"][parties[i]])
sources.extend([parties[i]] * self._summary["num_features"][parties[i]])
num_feature += self._summary["num_features"][parties[i]]
cor_mat = np.concatenate(cor_mat, axis=0)
ret = {
"corr": cor_mat,
"features": features,
"feature_source": sources,
"num_features": self._summary["num_features"]
}
return ret
| 9,458 | 37.45122 | 99 | py |
XFL | XFL-master/python/algorithm/framework/vertical/pearson/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/vertical/pearson/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import pickle
import numpy as np
from common.utils.utils import update_dict
from service.fed_config import FedConfig
from service.fed_node import FedNode
from .base import VerticalPearsonBase
from algorithm.core.encryption_param import PlainParam, PaillierParam
from common.crypto.paillier.paillier import Paillier
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.utils.logger import logger
from algorithm.core.paillier_acceleration import embed
class VerticalPearsonTrainer(VerticalPearsonBase):
def __init__(self, train_conf: dict):
"""
Args:
train_conf:
"""
self.sync_channel = BroadcastChannel(name="sync")
conf = self._sync_config()
update_dict(train_conf, conf)
super().__init__(train_conf, label=False)
self.channels = {}
self.node_id = FedNode.node_id
self.channels = dict()
self.channels["encryption_context"] = BroadcastChannel(name="encryption_context")
self.channels["sample_idx_com"] = BroadcastChannel(name="sample_idx_com")
self.channels["trainer_feature_com"] = DualChannel(
name="trainer_feature_com_" + self.node_id,
ids=[self.node_id] + FedConfig.get_label_trainer()
)
self.channels["trainer_corr_com"] = DualChannel(
name="trainer_corr_com_" + self.node_id,
ids=[self.node_id] + FedConfig.get_label_trainer()
)
self.channels["trainer_com"] = dict()
self.trainers = FedConfig.get_trainer()
for trainer_id in self.trainers:
if trainer_id != self.node_id:
if self.node_id not in self.trainers or trainer_id not in self.trainers:
continue
elif self.trainers.index(trainer_id) < self.trainers.index(self.node_id):
self.channels["trainer_com"][trainer_id] = DualChannel(
name="trainer_com_" + trainer_id + "_" + self.node_id,
ids=[self.node_id, trainer_id]
)
else:
self.channels["trainer_com"][trainer_id] = DualChannel(
name="trainer_com_" + self.node_id + "_" + trainer_id,
ids=[self.node_id, trainer_id]
)
if isinstance(self.encryption_param, (PlainParam, type(None))):
self.public_context = None
elif isinstance(self.encryption_param, PaillierParam):
self.public_context = self.channels["encryption_context"].recv(use_pickle=False)
self.public_context = Paillier.context_from(self.public_context)
else:
raise TypeError(f"Encryption param type {type(self.encryption_param)} not valid.")
self.feature_mapping = dict()
def _sync_config(self):
config = self.sync_channel.recv()
return config
def fit(self):
logger.info("vertical pearson trainer start.")
data = self._select_columns()
if self.sample_size is not None and self.sample_size < len(data):
logger.info("sampled %d data." % self.sample_size)
sample_ids = self.channels["sample_idx_com"].recv()
data = data.iloc[sample_ids]
data = data.apply(self.standardize)
n = len(data)
feature_names = data.columns.to_list()
self.feature_mapping = self.string_encryption(feature_names)
local_corr = np.dot(data.T, data)
local_corr /= n
self._local_summary["corr"] = local_corr
self._local_summary["features"] = feature_names
self._local_summary["num_features"] = {
self.node_id: len(feature_names)
}
self._local_summary["feature_source"] = [self.node_id] * len(feature_names)
self._local_summary["feature_mapping"] = self.feature_mapping
self._summary[(self.node_id, self.node_id)] = local_corr
feature_flag = self.channels["trainer_feature_com"].recv()
# remote_corr = pd.DataFrame()
j = 0
local_mat = np.array([])
if isinstance(self.encryption_param, PlainParam):
local_mat = data.to_numpy()
elif isinstance(self.encryption_param, PaillierParam):
local_mat = np.array(data.to_numpy() * 10 ** self.encryption_param.precision, dtype=int)
while not feature_flag:
other, feature_flag = self.channels["trainer_feature_com"].recv()
remote_corr = np.dot(local_mat.T, other)
self.channels["trainer_feature_com"].send(remote_corr)
logger.info("trainer calculated {} feature from label_trainer.".format(j + 1))
j += 1
self.channels["trainer_feature_com"].send([self.feature_mapping[f] for f in feature_names])
for i in range(self.trainers.index(self.node_id)):
trainer_id = self.trainers[i]
flag = self.channels["trainer_com"][trainer_id].recv()
j = 0
corr_mat = []
pack_nums = []
while not flag:
other, pack_num, flag = self.channels["trainer_com"][trainer_id].recv()
remote_corr = np.dot(local_mat.T, other)
self.channels["trainer_com"][trainer_id].send(True)
corr_mat.append(remote_corr)
pack_nums.append(pack_num)
logger.info("trainer {} calculated {} feature from trainer {}.".format(self.node_id, j + 1, trainer_id))
j += 1
self._summary[(self.node_id, trainer_id)] = (np.array(corr_mat), pack_nums)
for j in range(self.trainers.index(self.node_id) + 1, len(self.trainers)):
trainer_id = self.trainers[j]
if len(feature_names):
self.channels["trainer_com"][trainer_id].send(False)
else:
self.channels["trainer_com"][trainer_id].send(True)
if isinstance(self.encryption_param, (PlainParam, type(None))):
cnt = 0
for f in feature_names:
encrypted_data = data[f].to_numpy()
if f != feature_names[-1]:
self.channels["trainer_com"][trainer_id].send((encrypted_data, 1, False))
else:
self.channels["trainer_com"][trainer_id].send((encrypted_data, 1, True))
cnt += 1
logger.info(
"trainer {} encrypted {} features to trainer {}.".format(
self.node_id, cnt, trainer_id))
self.channels["trainer_com"][trainer_id].recv()
elif isinstance(self.encryption_param, PaillierParam):
for i in range(0, len(feature_names), self.num_embed):
embedded_data = embed(data.iloc[:, i:(i + self.num_embed)].to_numpy().T)
encrypted_data = Paillier.encrypt(
context=self.public_context,
data=embedded_data,
obfuscation=True,
num_cores=self.max_num_cores
)
if i + self.num_embed >= len(feature_names):
self.channels["trainer_com"][trainer_id].send(
(encrypted_data, len(feature_names) - i, True))
logger.info(
"trainer {} encrypted {} features to trainer {}.".format(
self.node_id, len(feature_names), trainer_id))
else:
self.channels["trainer_com"][trainer_id].send(
(encrypted_data, self.num_embed, False))
logger.info(
"trainer {} encrypted {} features to trainer {}.".format(
self.node_id, i + self.num_embed, trainer_id))
self.channels["trainer_com"][trainer_id].recv()
self.channels["trainer_corr_com"].send(self._summary)
self.save()
def save(self):
save_dir = str(Path(self.output.get("path")))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_name = self.output.get("corr")["name"]
model_path = Path(save_dir, model_name)
with open(model_path, 'wb') as f:
pickle.dump(self._local_summary, f)
logger.info("model saved as: {}.".format(model_path))
| 7,614 | 36.885572 | 108 | py |
XFL | XFL-master/python/algorithm/framework/vertical/feature_selection/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pickle
from pathlib import Path
import pandas as pd
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from common.model.python.feature_model_pb2 import WOEModel
from google.protobuf import json_format
from service.fed_job import FedJob
from service.fed_node import FedNode
class VerticalFeatureSelectionBase(TrainConfigParser):
def __init__(self, train_conf: dict, label: bool = False):
super().__init__(train_conf)
self.label = label
self.feature_info = []
self.iv_result = None
self.corr_result = None
self.transform_stages = []
self.train_id, self.train_label, self.train_features = None, None, None
self.val_id, self.val_label, self.val_features = None, None, None
self._init_config()
self._load_feature_info()
self._init_data()
def _init_data(self):
logger.info("init data loader.")
if not self.input_trainset:
return None
self.train_id, self.train_label, self.train_features = self._load_data(self.input_trainset[0])
self.transform_stages.append("train")
if not self.input_valset:
return None
self.val_id, self.val_label, self.val_features = self._load_data(self.input_valset[0])
self.transform_stages.append("valid")
def _load_data(self, input_info):
file_path = str(Path(input_info.get("path"), input_info.get("name")))
type_ = input_info.get("type", "None")
if input_info.get("has_id", True):
index_col = input_info.get("index_col", 'id')
else:
index_col = None
if input_info.get("has_label", False):
label_name = input_info.get("label_name", 'y')
self.label = True
else:
label_name = None
if type_ == "csv":
if self.computing_engine == "local":
df = pd.read_csv(file_path, index_col=index_col)
# elif self.computing_engine == "spark":
# df = ps.read_csv(file_path, index_col=index_col)
else:
raise NotImplementedError("Computing engine {} is not supported.".format(self.computing_engine))
else:
raise NotImplementedError("Dataset type {} is not supported.".format(type_))
if self.label:
feature_cols = [_ for _ in df.columns if _ != label_name]
features = df[feature_cols]
if label_name:
label = df[label_name]
else:
label = None
else:
features = df
label = None
ids = df.index
return ids, label, features
def _init_config(self):
params = self.train_info.get("train_params")
self.filter = params.get("filter", {})
def _parse_from_iv(self, params):
path = params["path"].replace("[JOB_ID]", str(FedJob.job_id)).replace("[NODE_ID]", str(FedNode.node_id))
with open(Path(path, params["name"])) as f:
res = json.load(f)
if self.label:
res = res.get("iv")
for k, v in res.items():
self.feature_info.append({
"feature_id": k,
"iv": v
})
return res
@staticmethod
def _parse_corr_result(params):
path = params["path"].replace("[JOB_ID]", str(FedJob.job_id)).replace("[NODE_ID]", str(FedNode.node_id))
with open(Path(path, params["name"]), 'rb') as f:
ret = pickle.load(f)
return ret
def _load_feature_info(self):
if "iv_result" in self.input:
self.iv_result = self._parse_from_iv(self.input.get("iv_result"))
if "corr_result" in self.input:
self.corr_result = self._parse_corr_result(self.input.get("corr_result"))
def _rewrite_model(self, features):
if self.input.get("model"):
params = self.input.get("model")
path = params["path"].replace("[JOB_ID]", str(FedJob.job_id)).replace("[NODE_ID]", str(FedNode.node_id))
file_path = Path(path, params["name"])
with open(file_path, 'rb') as f:
byte_str = f.read()
woe = WOEModel()
woe.ParseFromString(byte_str)
d = json_format.MessageToDict(woe,
including_default_value_fields=True,
preserving_proto_field_name=True)
ret = dict()
for k, v in d["feature_binning"].items():
if v["feature"] in features:
ret[k] = v
woe = WOEModel()
json_format.ParseDict({"feature_binning": ret}, woe)
with open(file_path, "wb") as f:
f.write(woe.SerializeToString())
logger.info("rewrite model in {}.".format(file_path))
| 4,753 | 32.957143 | 107 | py |
XFL | XFL-master/python/algorithm/framework/vertical/feature_selection/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from pathlib import Path
import pandas as pd
from service.fed_control import ProgressCalculator
from common.checker.matcher import get_matched_config
from common.checker.x_types import All
from common.communication.gRPC.python.channel import BroadcastChannel
from common.utils.logger import logger
from .base import VerticalFeatureSelectionBase
class VerticalFeatureSelectionLabelTrainer(VerticalFeatureSelectionBase):
def __init__(self, train_conf: dict):
"""
Args:
train_conf:
"""
self.sync_channel = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, label=True)
self.progress_calculator = ProgressCalculator(len(self.filter))
self.channels = dict()
self.channels["feature_id_com"] = BroadcastChannel(name="feature_id_com")
def _common_filter(self, params):
logger.info("common_filter")
metrics = params.get("metrics", 'iv')
filter_method = params.get("filter_method", "threshold")
if isinstance(metrics, str):
metrics = [metrics]
elif isinstance(params.get("metrics"), list):
pass
else:
raise NotImplementedError("param metrics must be a string or a list.")
for metric in metrics:
if filter_method == "threshold":
self.feature_info = self._threshold_filter(
metric,
params.get("threshold", 1)
)
def _sync_config(self, config):
sync_rule = {
"train_info": All()
}
config_to_sync = get_matched_config(config, sync_rule)
self.sync_channel.broadcast(config_to_sync)
def _threshold_filter(self, metric, threshold):
ret = []
if metric == "iv":
for r in self.feature_info:
if r["iv"] < threshold:
logger.info("filter feature {} < threshold {}".format(r["feature_id"], threshold))
continue
ret.append(r)
else:
raise NotImplementedError("metric {} is not supported".format(metric))
return ret
def _correlation_filter(self, params):
logger.info("correlation_filter")
sort_metric = params.get("sort_metric", 'iv')
correlation_threshold = params.get("correlation_threshold", 0.1)
self.channels["feature_id_com"].broadcast([_["feature_id"] for _ in self.feature_info])
mapping = {}
for d in self.channels["feature_id_com"].collect():
for k, v in d.items():
mapping[k] = v
corr = self.corr_result["corr"]
features = self.corr_result["features"]
res = []
filtered_features = []
self.feature_info.sort(key=lambda x: x[sort_metric], reverse=True)
for r in self.feature_info:
f = mapping.get(r["feature_id"], r["feature_id"])
if f in features:
i = features.index(f)
else:
continue
if features[i] in filtered_features:
continue
for f, s in zip(features, corr[i]):
if f == features[i]:
continue
elif f in filtered_features:
continue
elif abs(s) > correlation_threshold:
logger.info(
"current feature {}, filtered feature {}.".format(
features[i], f
)
)
filtered_features.append(f)
res.append({
"feature_id": mapping.get(r["feature_id"], r["feature_id"]),
"iv": r["iv"]
})
self.feature_info = res
def fit(self):
logger.info("feature selection label trainer start.")
# iter_ is used to calculate the progress of the training
iter_ = 0
for k, v in self.filter.items():
iter_ += 1
if k == "common":
self._common_filter(v)
elif k == "correlation":
self._correlation_filter(v)
else:
raise NotImplementedError("method {} is not implemented.".format(k))
# calculate and update the progress of the training
self.progress_calculator.cal_custom_progress(iter_)
self.channels["feature_id_com"].broadcast([_["feature_id"] for _ in self.feature_info])
self.save()
self.transform()
ProgressCalculator.finish_progress()
def transform(self):
if not self.transform_stages:
return None
selected_features = [_["feature_id"] for _ in self.feature_info]
selected_features = [_ for _ in self.train_features.columns if _ in selected_features]
if "train" in self.transform_stages:
self.train_features = self.train_features[selected_features]
if self.train_label is not None:
df = pd.concat([self.train_label, self.train_features], axis=1).set_index(self.train_id)
else:
df = self.train_features.set_index(self.train_id)
output_train_path = Path(self.output.get("path"), self.output["trainset"].get("name"))
if not os.path.exists(os.path.dirname(output_train_path)):
os.makedirs(os.path.dirname(output_train_path))
df.to_csv(output_train_path, header=True, index=True, index_label="id", float_format="%.6g")
if "valid" in self.transform_stages:
self.val_features = self.val_features[selected_features]
if self.val_label is not None:
df = pd.concat([self.val_label, self.val_features], axis=1).set_index(self.val_id)
else:
df = self.val_features.set_index(self.val_id)
output_val_path = Path(self.output.get("path"), self.output["valset"].get("name"))
if not os.path.exists(os.path.dirname(output_val_path)):
os.makedirs(os.path.dirname(output_val_path))
df.to_csv(output_val_path, header=True, index=True, index_label="id", float_format="%.6g")
def save(self):
save_dir = str(Path(self.output.get("path")))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_name = self.output.get("model")["name"]
model_path = Path(save_dir, model_name)
features = [_["feature_id"] for _ in self.feature_info]
output = {
"features": features,
"num_of_features": len(self.feature_info)
}
with open(model_path, 'wb') as f:
pickle.dump(output, f)
logger.info("model saved as: {}.".format(model_path))
self._rewrite_model(features)
| 6,269 | 32.174603 | 95 | py |
XFL | XFL-master/python/algorithm/framework/vertical/feature_selection/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/vertical/feature_selection/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from pathlib import Path
import pandas as pd
from common.communication.gRPC.python.channel import BroadcastChannel
from common.utils.logger import logger
from common.utils.utils import update_dict
from .base import VerticalFeatureSelectionBase
class VerticalFeatureSelectionTrainer(VerticalFeatureSelectionBase):
def __init__(self, train_conf: dict):
"""
Args:
train_conf:
"""
self.sync_channel = BroadcastChannel(name="sync")
conf = self._sync_config()
update_dict(train_conf, conf)
super().__init__(train_conf, label=False)
self.channels = dict()
self.channels["feature_id_com"] = BroadcastChannel(name="feature_id_com")
self.feature_mapping = dict()
def _sync_config(self):
config = self.sync_channel.recv()
return config
def _common_filter(self, params):
metrics = params.get("metrics", 'iv')
if isinstance(metrics, str):
metrics = [metrics]
elif isinstance(params.get("metrics"), list):
pass
else:
raise NotImplementedError("param metrics must be a string or a list.")
for metric in metrics:
if metric == "iv":
for k, v in self.iv_result["feature_mapping"].items():
self.feature_mapping[k] = v
def _correlation_filter(self):
feature_id_list = self.channels["feature_id_com"].recv()
mapping = {}
reversed_feature_mapping = dict()
for k, v in self.feature_mapping.items():
reversed_feature_mapping[v] = k
for feature_id in feature_id_list:
if feature_id in reversed_feature_mapping:
local_feature_name = reversed_feature_mapping.get(feature_id)
if local_feature_name in self.corr_result["feature_mapping"]:
mapping[feature_id] = self.corr_result["feature_mapping"][local_feature_name]
else:
continue
self.channels["feature_id_com"].send(mapping)
self.feature_mapping = self.corr_result["feature_mapping"]
def fit(self):
logger.info("feature selection trainer start.")
for k, v in self.filter.items():
if k == "common":
self._common_filter(v)
elif k == "correlation":
self._correlation_filter()
else:
raise NotImplementedError("method {} is not implemented.".format(k))
remain_id_list = self.channels["feature_id_com"].recv()
res = dict()
for k, v in self.feature_mapping.items():
if v in remain_id_list:
res[k] = v
self.feature_mapping = res
self.save()
self.transform()
def transform(self):
if not self.transform_stages:
return None
selected_features = [_ for _ in self.feature_mapping]
if "train" in self.transform_stages:
self.train_features = self.train_features[selected_features]
if self.train_label is not None:
df = pd.concat([self.train_label, self.train_features], axis=1).set_index(self.train_id)
else:
df = self.train_features.set_index(self.train_id)
output_train_path = Path(self.output.get("path"), self.output["trainset"].get("name"))
if not os.path.exists(os.path.dirname(output_train_path)):
os.makedirs(os.path.dirname(output_train_path))
df.to_csv(output_train_path, header=True, index=True, index_label="id", float_format="%.6g")
if "valid" in self.transform_stages:
self.val_features = self.val_features[selected_features]
if self.val_label is not None:
df = pd.concat([self.val_label, self.val_features], axis=1).set_index(self.val_id)
else:
df = self.val_features.set_index(self.val_id)
output_val_path = Path(self.output.get("path"), self.output["valset"].get("name"))
if not os.path.exists(os.path.dirname(output_val_path)):
os.makedirs(os.path.dirname(output_val_path))
df.to_csv(output_val_path, header=True, index=True, index_label="id", float_format="%.6g")
def save(self):
save_dir = str(Path(self.output.get("path")))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_name = self.output.get("model")["name"]
model_path = Path(save_dir, model_name)
features = list(self.feature_mapping.keys())
with open(model_path, 'wb') as f:
pickle.dump(self.feature_mapping, f)
logger.info("model saved as: {}.".format(model_path))
self._rewrite_model(features)
| 4,683 | 33.441176 | 95 | py |
XFL | XFL-master/python/algorithm/framework/vertical/poisson_regression/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.framework.vertical.vertical_model_base import VerticalModelBase
from common.utils.logger import logger
from common.utils.model_preserver import ModelPreserver
from service.fed_config import FedConfig
class VerticalPoissonRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super(VerticalPoissonRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, 1, bias=bias)
self.linear.requires_grad_(False)
def forward(self, x):
return self.linear(x)
class VerticalPoissonRegressionBase(VerticalModelBase):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""_summary_
Args:
train_conf (dict): _description_
label (bool, optional): _description_. Defaults to False.
"""
super().__init__(train_conf)
self._parse_config()
self.train_conf = train_conf
self.label = label
self.data_dim = None
self.model = None
self.train_dataloader, self.eval_dataloader = None, None
if FedConfig.node_id != "assist_trainer":
self._init_dataloader()
def _parse_config(self) -> None:
super()._parse_config()
self.model_name = self.model_info.get("name")
self.save_model_name = self.output.get("model", {}).get("name")
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.global_epoch = self.train_params.get("global_epoch")
self.batch_size = self.train_params.get("batch_size")
self.encryption_config = self.train_params.get("encryption")
self.optimizer_config = self.train_params.get("optimizer")
self.pretrain_model_path = self.input.get("pretrained_model", {}).get("path")
self.random_seed = self.train_params.get("random_seed", None)
self.early_stopping_config = self.train_params.get("early_stopping")
self.save_frequency = self.interaction_params.get("save_frequency")
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def _init_model(self, bias: bool = False) -> None:
"""
Init poisson regression model.
Returns: None
"""
logger.info("Init model start.")
self.model = VerticalPoissonRegression(input_dim=self.data_dim, bias=bias)
# Load pretrained model if needed.
if self.pretrain_model_path is not None and self.pretrain_model_path != "":
checkpoint = ModelPreserver.load(os.path.join(self.pretrain_model_path, self.input.get(
"pretrained_model").get("name", None)))
self.model.load_state_dict(checkpoint["state_dict"])
logger.info("Init model completed.")
def __load_data(self, config) -> CsvReader:
config = config[0]
if config["type"] == "csv":
data_reader = CsvReader(path=os.path.join(config["path"], config["name"]), has_id=config["has_id"],
has_label=config["has_label"])
else:
raise NotImplementedError("Dataset type {} is not supported.".format(config["type"]))
return data_reader
def _init_data(self) -> None:
if len(self.input_trainset) > 0:
data: CsvReader = self.__load_data(self.input_trainset)
self.train = data.features()
self.train_label = data.label()
self.train_ids = list(range(len(data.ids)))
else:
raise NotImplementedError("Trainset was not configured.")
if self.label:
assert len(self.train) == len(self.train_label)
if len(self.input_valset) > 0:
data: CsvReader = self.__load_data(self.input_valset)
self.val = data.features()
self.val_label = data.label()
self.val_ids = list(range(len(data.ids)))
if self.label:
assert len(self.val) == len(self.val_label)
def _init_dataloader(self) -> None:
"""
Load raw data.
Returns:
"""
logger.info("Dataloader initiation start.")
self._init_data()
if self.label:
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.train, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.train_label), dim=-1),
torch.unsqueeze(torch.tensor(self.train_ids), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.val, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.val_label), dim=-1),
torch.unsqueeze(torch.tensor(self.val_ids), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(self.train).shape[-1]
logger.info("Train data shape: {}.".format(list(torch.tensor(self.train).shape)))
else:
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.train, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.train_ids), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.val, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.val_ids), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(self.train).shape[-1]
logger.info("Train data shape: {}.".format(list(torch.tensor(self.train).shape)))
logger.info("Dataloader initiation completed.")
| 6,716 | 41.783439 | 111 | py |
XFL | XFL-master/python/algorithm/framework/vertical/poisson_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import secrets
import random
from functools import reduce
from pathlib import Path
from common.checker.x_types import All
import numpy as np
import pandas as pd
import tenseal as ts
import torch
from common.checker.matcher import get_matched_config
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.algo_utils import earlyStopping
from common.utils.logger import logger
from common.utils.model_preserver import ModelPreserver
from common.utils.utils import save_model_config
from service.fed_config import FedConfig
from service.fed_node import FedNode
from service.fed_control import ProgressCalculator
from .base import VerticalPoissonRegressionBase
class VerticalPoissonRegressionLabelTrainer(VerticalPoissonRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""
Vertical Poisson Regression
Args:
train_conf: training parameters
*args:
**kwargs:
"""
self.sync_channel = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, label=True, *args, **kwargs)
if self.random_seed is None:
self.random_seed = random.randint(-(1 << 32), 1 << 32)
self.sync_channel.broadcast(self.random_seed)
self.set_seed(self.random_seed)
self.progress_calculator = ProgressCalculator(self.global_epoch, len(self.train_dataloader))
self._init_model(bias=True)
self.export_conf = [{
"class_name": "VerticalPoissonRegression",
"identity": self.identity,
"filename": self.save_model_name,
"input_dim": self.data_dim,
"bias": True,
"version": "1.4.0"
}]
self.es = earlyStopping(key=self.early_stopping_config["key"],
patience=self.early_stopping_config["patience"],
delta=self.early_stopping_config["delta"])
self.best_model = None
self.broadcast_channel = BroadcastChannel(name="Public keys", root_id=FedConfig.get_assist_trainer())
self.dual_channels = {"intermediate_label_trainer": {}, "gradients_loss": None}
self.trainers = FedConfig.get_trainer()
for party_id in self.trainers:
self.dual_channels["intermediate_label_trainer"][party_id] = DualChannel(
name="intermediate_label_trainer_" + party_id, ids=[FedConfig.node_id, party_id])
self.dual_channels["gradients_loss"] = DualChannel(name="gradients_loss_" + FedConfig.node_id,
ids=[FedConfig.get_assist_trainer()] + [FedConfig.node_id])
self.train_result = None
self.val_result = None
self.dual_channels["gradients_loss"].send(len(self.train_dataloader))
self.dual_channels["gradients_loss"].send(self.global_epoch)
self.dual_channels["gradients_loss"].send(self.batch_size)
self.encryption_method = list(self.encryption_config.keys())[0].lower()
self.dual_channels["gradients_loss"].send(self.encryption_config)
self.dual_channels["gradients_loss"].send(self.encryption_method)
def _sync_config(self, config):
sync_rule = {
"train_info": All()
}
config_to_sync = get_matched_config(config, sync_rule)
self.sync_channel.broadcast(config_to_sync)
def predict(self, input_data):
pred_prob_epoch, y_epoch, pred_tmp_epoch = [], [], []
for batch_idx, (x_batch, y_batch, _) in enumerate(input_data):
pred_trainer_list = []
pre_tmp = self.model(x_batch)
# receive intermediate results from trainers
for party_id in FedConfig.get_trainer():
pred_trainer_list.append(self.dual_channels["intermediate_label_trainer"][party_id].recv(
use_pickle=True))
# calculate prediction of batch and tmp_pred of batch
pred_tmp_total = pre_tmp.numpy().astype(np.float32).flatten() + reduce(
lambda x, y: x + y, pred_trainer_list)
pred_total = np.exp(pred_tmp_total)
# calculate prediction of epoch
pred_prob_epoch += pred_total.tolist()
pred_tmp_epoch += pred_tmp_total.tolist()
y_epoch += y_batch.numpy().astype(np.float32).flatten().tolist()
return y_epoch, pred_prob_epoch, pred_tmp_epoch
def fit(self):
self.check_data()
public_context = None
num_cores = -1
rng = secrets.SystemRandom()
logger.info("Vertical poisson regression training start")
# receive encryption key from assist trainer
if self.encryption_method == "ckks":
logger.info("Receive ckks public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = ts.context_from(public_context)
logger.info("Public key received.")
elif self.encryption_method == "paillier":
logger.info("Receive paillier public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = Paillier.context_from(public_context)
logger.info("Public key received.")
elif self.encryption_method == "plain":
pass
else:
raise ValueError(f"Encryption method {self.encryption_method} not supported! Valid methods are "
f"'paillier', 'ckks', 'plain'.")
# train
for epoch in range(1, self.global_epoch + 1):
loss_epoch = 0
for batch_idx, (x_batch, y_batch, _) in enumerate(self.train_dataloader):
regular_loss_tmp = 0
regular_gradient_tmp = 0
enc_regular_gradient_tmp = 0
trainer_exp = None
# calculate regular results
if self.optimizer_config['p'] == 1:
regular_loss_tmp = torch.abs(self.model.linear.weight).sum() * self.optimizer_config['alpha']
regular_gradient_tmp = self.optimizer_config['alpha'] * (torch.abs(self.model.linear.weight)
/ self.model.linear.weight)
elif self.optimizer_config['p'] == 2:
regular_loss_tmp = (self.model.linear.weight ** 2).sum() * self.optimizer_config['alpha'] / 2
regular_gradient_tmp = self.optimizer_config['alpha'] * self.model.linear.weight
elif self.optimizer_config['p'] == 0:
pass
# receive intermediate exp results from trainers and compute total_exp
logger.info("Calculate predicted exp result of all trainers.")
pred_label_tmp = self.model(x_batch)
pred_label_trainer = torch.exp(pred_label_tmp)
if self.encryption_method == "ckks":
trainer_exp = self.dual_channels["intermediate_label_trainer"][self.trainers[-1]].recv(
use_pickle=False)
trainer_exp = ts.ckks_vector_from(public_context, trainer_exp)
elif self.encryption_method == "paillier":
trainer_exp = self.dual_channels["intermediate_label_trainer"][self.trainers[-1]].recv(
use_pickle=False)
trainer_exp = Paillier.ciphertext_from(public_context, trainer_exp)
elif self.encryption_method == "plain":
trainer_exp = self.dual_channels["intermediate_label_trainer"][self.trainers[-1]].recv()
total_exp = trainer_exp * pred_label_trainer.numpy().astype(np.float32).flatten()
# receive immediate results from trainers
logger.info("Calculate predicted result of all trainers.")
if self.encryption_method == "ckks":
total_sum = ts.ckks_vector(public_context, pred_label_tmp.numpy().astype(np.float32).flatten())
else:
total_sum = pred_label_tmp.numpy().astype(np.float32).flatten()
for party_id in self.trainers:
if self.encryption_method == "ckks":
total_sum = total_sum + ts.ckks_vector_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False))
trainer_regular_loss = ts.ckks_vector_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False))
elif self.encryption_method == "paillier":
total_sum = total_sum + Paillier.ciphertext_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False))
trainer_regular_loss = Paillier.ciphertext_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False))
elif self.encryption_method == "plain":
total_sum = total_sum + self.dual_channels["intermediate_label_trainer"][party_id].recv()
trainer_regular_loss = self.dual_channels["intermediate_label_trainer"][party_id].recv()
# calculate total loss
logger.info("Calculate total loss.")
enc_loss = total_exp - total_sum * y_batch.numpy().astype(np.float32).flatten()
if self.encryption_method == "ckks":
regular_loss_tmp = ts.ckks_vector(public_context,
regular_loss_tmp.numpy().astype(np.float32).flatten())
else:
regular_loss_tmp = regular_loss_tmp.numpy().astype(np.float32).flatten()
enc_loss_batch = enc_loss + regular_loss_tmp + trainer_regular_loss
# send total loss to assist_trainer
logger.info("Send encrypted total loss to assist_trainer.")
if self.encryption_method == "ckks":
self.dual_channels["gradients_loss"].send(enc_loss_batch.serialize(), use_pickle=False)
elif self.encryption_method == "paillier":
self.dual_channels["gradients_loss"].send(Paillier.serialize(enc_loss_batch), use_pickle=False)
elif self.encryption_method == "plain":
self.dual_channels["gradients_loss"].send(enc_loss_batch)
# receive decrypted loss from assist_trainer
logger.info("Receive decrypted total loss from assist_trainer.")
loss_batch = self.dual_channels["gradients_loss"].recv()
loss_batch = loss_batch / x_batch.shape[0]
logger.info("Loss of {} batch is {}".format(batch_idx, loss_batch))
loss_epoch += loss_batch * x_batch.shape[0]
# calculate intermediate result d
logger.info("Calculate intermediate result d.")
enc_y = None
if self.encryption_method == "ckks":
enc_y = ts.ckks_vector(public_context, y_batch.numpy().astype(np.float32).flatten())
elif self.encryption_method == "paillier":
enc_y = Paillier.encrypt(public_context, y_batch.numpy().astype(np.float32).flatten(),
precision=self.encryption_config[self.encryption_method][
"precision"], obfuscation=True, num_cores=num_cores)
elif self.encryption_method == "plain":
enc_y = y_batch.numpy().astype(np.float32).flatten()
enc_d = total_exp - enc_y
# send intermediate result d to trainers
logger.info("Send intermediate result d to trainers.")
for party_id in self.trainers:
if self.encryption_method == "ckks":
self.dual_channels["intermediate_label_trainer"][party_id].send(enc_d.serialize(),
use_pickle=False)
elif self.encryption_method == "paillier":
self.dual_channels["intermediate_label_trainer"][party_id].send(Paillier.serialize(enc_d),
use_pickle=False)
elif self.encryption_method == "plain":
self.dual_channels["intermediate_label_trainer"][party_id].send(enc_d)
# calculate gradient for label_trainer
logger.info("Calculate gradients for label_trainer.")
if self.encryption_method == "ckks":
enc_regular_gradient_tmp = ts.ckks_vector(public_context,
regular_gradient_tmp.numpy().astype(np.float32).flatten())
elif self.encryption_method == "paillier":
enc_regular_gradient_tmp = Paillier.encrypt(
public_context, regular_gradient_tmp.numpy().astype(np.float32).flatten(),
precision=self.encryption_config[self.encryption_method]["precision"],
obfuscation=True, num_cores=num_cores)
elif self.encryption_method == "plain":
enc_regular_gradient_tmp = regular_gradient_tmp.numpy().astype(np.float32).flatten()
if self.encryption_method == "ckks":
gradient_label_trainer_w = enc_d.matmul(x_batch.numpy()) + enc_regular_gradient_tmp
else:
gradient_label_trainer_w = np.matmul(enc_d.reshape(1, len(enc_d)), x_batch.numpy()
) + enc_regular_gradient_tmp
gradient_label_trainer_b = enc_d
if self.encryption_method == "ckks":
# add noise to encrypted gradients and send to assist_trainer
logger.info("Calculate noised gradients for label_trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
noise_b = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[0])],
dtype=np.float32)
noise_b /= 100000
noised_gradient_label_trainer_w = gradient_label_trainer_w + noise
noised_gradient_label_trainer_b = gradient_label_trainer_b + noise_b
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(noised_gradient_label_trainer_w.serialize(),
use_pickle=False)
self.dual_channels["gradients_loss"].send(noised_gradient_label_trainer_b.serialize(),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_decrypt_gradient = self.dual_channels["gradients_loss"].recv()
noised_decrypt_gradient_label_trainer_w = noised_decrypt_gradient["noised_gradient_label_trainer_w"]
noised_decrypt_gradient_label_trainer_b = noised_decrypt_gradient["noised_gradient_label_trainer_b"]
gradient_label_trainer_w = noised_decrypt_gradient_label_trainer_w - noise
gradient_label_trainer_b = noised_decrypt_gradient_label_trainer_b - np.sum(noise_b)
elif self.encryption_method == "paillier":
# add noise to encrypted gradients and send to assist_trainer
logger.info("Calculate noised gradients for label_trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
noise_b = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[0])],
dtype=np.float32)
noise_b /= 100000
noised_gradient_label_trainer_w = gradient_label_trainer_w + noise
noised_gradient_label_trainer_b = gradient_label_trainer_b + noise_b
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_label_trainer_w),
use_pickle=False)
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_label_trainer_b),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_decrypt_gradient = self.dual_channels["gradients_loss"].recv()
noised_decrypt_gradient_label_trainer_w = noised_decrypt_gradient["noised_gradient_label_trainer_w"]
noised_decrypt_gradient_label_trainer_b = noised_decrypt_gradient["noised_gradient_label_trainer_b"]
gradient_label_trainer_w = noised_decrypt_gradient_label_trainer_w - noise
gradient_label_trainer_b = noised_decrypt_gradient_label_trainer_b - np.sum(noise_b)
elif self.encryption_method == "plain":
gradient_label_trainer_b = gradient_label_trainer_b.sum()
# update w and b of label_trainer
gradient_label_trainer_w = gradient_label_trainer_w / x_batch.shape[0]
gradient_label_trainer_b = gradient_label_trainer_b / x_batch.shape[0]
logger.info("Update weights of label trainer.")
self.model.linear.weight -= (torch.FloatTensor(gradient_label_trainer_w) * self.optimizer_config["lr"])
self.model.linear.bias -= (gradient_label_trainer_b * self.optimizer_config["lr"])
# calculate and update the progress of the training
self.progress_calculator.cal_custom_progress(epoch, batch_idx+1)
loss_epoch = loss_epoch * (1 / len(self.train))
logger.info("Loss of {} epoch is {}".format(epoch, loss_epoch))
# predict train and val results for metrics
logger.info("Predict train weights of label trainer.")
self.train_result = self.predict(self.train_dataloader)
loss_train_met = {"loss": loss_epoch}
self._calc_metrics(np.array(self.train_result[1], dtype=float), np.array(self.train_result[0]),
epoch, stage="train", loss=loss_train_met)
logger.info("Predict val weights of label trainer.")
self.val_result = self.predict(self.val_dataloader)
loss_val = np.mean(
np.array(self.val_result[1]) - np.array(self.val_result[0]) * np.array(self.val_result[2]))
loss_val_met = {"loss": loss_val} # no regular
val_metrics = self._calc_metrics(np.array(self.val_result[1], dtype=float), np.array(self.val_result[0]),
epoch, stage="val", loss=loss_val_met)
# early stopping
val_metrics["loss"] = - val_metrics["loss"]
if self.early_stopping_config["patience"] > 0:
early_stop_flag, best_model_flag = self.es(val_metrics)
else:
early_stop_flag, best_model_flag = False, True
# update best model
if best_model_flag:
self.best_model = copy.deepcopy(self.model)
# send flags to trainers
for party_id in FedConfig.get_trainer():
self.dual_channels["intermediate_label_trainer"][party_id].send(
[early_stop_flag, best_model_flag, self.early_stopping_config["patience"]], use_pickle=True)
# if need to save results by epoch
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
ModelPreserver.save(save_dir=self.save_dir, model_name=self.save_model_name,
state_dict=self.model.state_dict(), epoch=epoch)
# if early stopping, break
if early_stop_flag:
# update the progress of 100 to show the training is finished
ProgressCalculator.finish_progress()
break
# save model for infer
save_model_config(stage_model_config=self.export_conf, save_path=Path(self.save_dir))
# if not early stopping, save probabilities and model
self._save_prob()
ModelPreserver.save(save_dir=self.save_dir, model_name=self.save_model_name,
state_dict=self.best_model.state_dict(), final=True)
# calculate feature importance
self._save_feature_importance(self.dual_channels)
def _save_prob(self):
if self.interaction_params.get("write_training_prediction"):
self._write_prediction(self.train_result[1], self.train_result[0], self.train_ids,
stage="train", final=True)
if self.interaction_params.get("write_validation_prediction"):
self._write_prediction(self.val_result[1], self.val_result[0], self.val_ids,
stage="val", final=True)
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com", ids=[FedConfig.node_id] + FedConfig.get_trainer())
n = self.data_dim
dims = dim_channel.collect()
for dim in dims:
n += dim
if n <= 0:
raise ValueError("Number of the feature is zero. Stop training.")
def _save_feature_importance(self, channel):
res = {"owner_id": [], "fid": [], "importance": []}
other_weight_list = []
for party_id in FedConfig.get_trainer():
other_weight_list.append(channel["intermediate_label_trainer"][party_id].recv(use_pickle=True))
for (owner_id, weights) in other_weight_list:
for fid, weight in enumerate(weights):
res["owner_id"].append(owner_id)
res["fid"].append(fid)
res["importance"].append(float(weight))
for fid, weight in enumerate(self.best_model.state_dict()["linear.weight"][0]):
res["owner_id"].append(FedNode.node_id)
res["fid"].append(fid)
res["importance"].append(float(weight))
res = pd.DataFrame(res).sort_values(by="importance", key=lambda col: np.abs(col), ascending=False)
res.to_csv(Path(self.save_dir, self.output["feature_importance"]["name"]), header=True, index=False,
float_format="%.6g")
| 24,198 | 58.166259 | 120 | py |
XFL | XFL-master/python/algorithm/framework/vertical/poisson_regression/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tenseal as ts
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.logger import logger
from service.fed_config import FedConfig
class VerticalPoissonRegressionAssistTrainer(object):
def __init__(self, *args, **kwargs):
"""[summary]
assist_trainer
"""
self.broadcast_channel = BroadcastChannel(name="Public keys", root_id=FedConfig.get_assist_trainer())
self.dual_channels = {"gradients_loss": {}}
self.party_id_list = FedConfig.get_label_trainer() + FedConfig.get_trainer()
for party_id in self.party_id_list:
self.dual_channels["gradients_loss"][party_id] = DualChannel(name="gradients_loss_" + party_id,
ids=[FedConfig.node_id, party_id])
self.batch_num = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
self.global_epoch = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
self.batch_size = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
self.encryption_config = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
self.encryption_method = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
self.private_context = None
self.public_context = None
# send encryption key to all parties
if self.encryption_method == "ckks":
self.private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=self.encryption_config[self.encryption_method]["poly_modulus_degree"],
coeff_mod_bit_sizes=self.encryption_config[self.encryption_method]["coeff_mod_bit_sizes"]
)
self.private_context.generate_galois_keys()
self.private_context.generate_relin_keys()
self.private_context.global_scale = 1 << self.encryption_config[self.encryption_method][
"global_scale_bit_size"]
serialized_public_context = self.private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
logger.info("Broadcast ckks public keys.")
self.public_context_ser = serialized_public_context
self.broadcast_channel.broadcast(self.public_context_ser, use_pickle=False)
logger.info("Broadcast completed.")
elif self.encryption_method == "paillier":
self.num_cores = -1 if self.encryption_config[self.encryption_method]["parallelize_on"] else 1
self.private_context = Paillier.context(self.encryption_config[self.encryption_method]["key_bit_size"],
djn_on=self.encryption_config[self.encryption_method]["djn_on"])
logger.info("Broadcast paillier public keys.")
self.public_context_ser = self.private_context.to_public().serialize()
self.broadcast_channel.broadcast(self.public_context_ser, use_pickle=False)
logger.info("Broadcast completed.")
elif self.encryption_method == "plain":
pass
else:
raise ValueError(f"Encryption method {self.encryption_method} not supported! Valid methods are 'paillier', "
f"'ckks', 'plain'.")
def fit(self):
""" train model
Model parameters need to be updated before fitting.
"""
# send encryption key to all parties
if self.encryption_method in ["ckks", "paillier"]:
logger.info("Broadcast ckks public keys.")
self.broadcast_channel.broadcast(self.public_context_ser, use_pickle=False)
logger.info("Broadcast completed.")
# train
for epoch in range(1, self.global_epoch + 1):
for batch_idx in range(self.batch_num):
# receive and decrypt total encrypted loss and send to label_trainer
logger.info("Receive and decrypted total loss and send back to label_trainer.")
if self.encryption_method == "ckks":
enc_loss_batch = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv(
use_pickle=False)
decrypted_loss_batch = ts.ckks_vector_from(self.private_context, enc_loss_batch).decrypt()
elif self.encryption_method == "paillier":
enc_loss_batch = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv(
use_pickle=False)
decrypted_loss_batch = Paillier.decrypt(self.private_context, Paillier.ciphertext_from(
None, enc_loss_batch), dtype='float', num_cores=self.num_cores)
elif self.encryption_method == "plain":
enc_loss_batch = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
decrypted_loss_batch = enc_loss_batch
decrypted_loss_batch = np.sum(decrypted_loss_batch)
self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].send(decrypted_loss_batch)
logger.info(
"Loss of {} batch {} epoch is {}".format(batch_idx, epoch, decrypted_loss_batch / self.batch_size))
# receive encrypted noised gradients from other parties and decrypt and send back to other parties
if self.encryption_method == "ckks" or self.encryption_method == "paillier":
# trainer
for party_id in FedConfig.get_trainer():
en_noised_gradient_trainer_w = self.dual_channels["gradients_loss"][party_id].recv(
use_pickle=False)
if self.encryption_method == "ckks":
noised_gradient_trainer_w = ts.ckks_vector_from(self.private_context,
en_noised_gradient_trainer_w).decrypt()
elif self.encryption_method == "paillier":
noised_gradient_trainer_w = Paillier.decrypt(self.private_context, Paillier.ciphertext_from(
None, en_noised_gradient_trainer_w), dtype='float', num_cores=self.num_cores)
self.dual_channels["gradients_loss"][party_id].send(noised_gradient_trainer_w)
# label_trainer
en_noised_gradient_label_trainer_w = self.dual_channels["gradients_loss"][
FedConfig.get_label_trainer()[0]].recv(use_pickle=False)
en_noised_gradient_label_trainer_b = self.dual_channels["gradients_loss"][
FedConfig.get_label_trainer()[0]].recv(use_pickle=False)
if self.encryption_method == "ckks":
noised_gradient_label_trainer_w = ts.ckks_vector_from(
self.private_context, en_noised_gradient_label_trainer_w).decrypt()
noised_gradient_label_trainer_b = ts.ckks_vector_from(
self.private_context, en_noised_gradient_label_trainer_b).decrypt()
elif self.encryption_method == "paillier":
noised_gradient_label_trainer_w = Paillier.decrypt(
self.private_context, Paillier.ciphertext_from(None, en_noised_gradient_label_trainer_w),
dtype='float', num_cores=self.num_cores)
noised_gradient_label_trainer_b = Paillier.decrypt(
self.private_context, Paillier.ciphertext_from(None, en_noised_gradient_label_trainer_b),
dtype='float', num_cores=self.num_cores)
# calculate sum of gradient b
noised_gradient_label_trainer_b = np.sum(noised_gradient_label_trainer_b)
grad_send = {"noised_gradient_label_trainer_w": noised_gradient_label_trainer_w,
"noised_gradient_label_trainer_b": noised_gradient_label_trainer_b}
self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].send(grad_send)
| 9,167 | 61.794521 | 120 | py |
XFL | XFL-master/python/algorithm/framework/vertical/poisson_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/vertical/poisson_regression/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import secrets
from pathlib import Path
import numpy as np
import tenseal as ts
import torch
from common.utils.utils import update_dict
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.logger import logger
from service.fed_config import FedConfig
from service.fed_node import FedNode
from common.utils.model_preserver import ModelPreserver
from common.utils.utils import save_model_config
from .base import VerticalPoissonRegressionBase
class VerticalPoissonRegressionTrainer(VerticalPoissonRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""[summary]
Args:
train_conf (dict): [description]
"""
self.sync_channel = BroadcastChannel(name="sync")
conf = self._sync_config()
update_dict(train_conf, conf)
super().__init__(train_conf, label=False, *args, **kwargs)
self._init_model()
self.export_conf = [{
"class_name": "VerticalPoissonRegression",
"identity": self.identity,
"filename": self.save_model_name,
"input_dim": self.data_dim,
"bias": False
}]
if self.random_seed is None:
self.random_seed = self.sync_channel.recv()
self.set_seed(self.random_seed)
self.best_model = None
self.node_id = FedConfig.node_id
self.broadcast_channel = BroadcastChannel(name="Public keys", root_id=FedConfig.get_assist_trainer())
self.trainers = FedConfig.get_trainer()
self.dual_trainers = {}
if len(FedConfig.get_trainer()) > 1:
for trainer in self.trainers:
if trainer != self.node_id:
self.dual_trainers[trainer] = DualChannel(name="Trainer exchange",
ids=[trainer, self.node_id])
self.dual_channels = {
"intermediate_label_trainer": DualChannel(name="intermediate_label_trainer_" + self.node_id,
ids=FedConfig.get_label_trainer() + [self.node_id]),
"gradients_loss": DualChannel(name="gradients_loss_" + self.node_id,
ids=[FedConfig.get_assist_trainer()] + [self.node_id])
}
def _sync_config(self):
config = self.sync_channel.recv()
return config
def predict(self, input_data):
for batch_idx, x_batch in enumerate(input_data):
# calculate prediction of batch
pred_trainer = self.model(x_batch[0])
# send to label_trainer
self.dual_channels["intermediate_label_trainer"].send(pred_trainer.numpy().astype(np.float32).flatten(),
use_pickle=True)
def fit(self):
""" train model
Model parameters need to be updated before fitting.
"""
self.check_data()
num_cores = -1
encryption_config = self.encryption_config
encryption_method = list(self.encryption_config.keys())[0].lower()
logger.info("Vertical poisson regression training start")
# receive encryption key from assist trainer
public_context = None
if encryption_method == "ckks":
logger.info("Receive ckks public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = ts.context_from(public_context)
logger.info("Public key received.")
elif encryption_method == "paillier":
logger.info("Receive paillier public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = Paillier.context_from(public_context)
logger.info("Public key received.")
elif encryption_method == "plain":
pass
else:
raise ValueError(
f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'ckks', 'plain'.")
rng = secrets.SystemRandom()
# train
for epoch in range(1, self.global_epoch + 1):
for batch_idx, x_batch in enumerate(self.train_dataloader):
regular_loss_tmp = 0
regular_gradient_tmp = 0
enc_regular_gradient_tmp = 0
# calculate regular results
if self.optimizer_config['p'] == 1:
regular_loss_tmp = torch.abs(self.model.linear.weight).sum() * self.optimizer_config['alpha']
regular_gradient_tmp = self.optimizer_config['alpha'] * (torch.abs(self.model.linear.weight)
/ self.model.linear.weight)
elif self.optimizer_config['p'] == 2:
regular_loss_tmp = (self.model.linear.weight ** 2).sum() * self.optimizer_config['alpha'] / 2
regular_gradient_tmp = self.optimizer_config['alpha'] * self.model.linear.weight
elif self.optimizer_config['p'] == 0:
pass
regular_loss_tmp = regular_loss_tmp.numpy().astype(np.float32).flatten()
# compute multiplication of exp of all trainers
pred_tmp = self.model(x_batch[0])
pred_trainer = torch.exp(pred_tmp).numpy().astype(np.float32).flatten()
# if node_id is the first trainer of trainers, encrypt the result
if self.node_id == self.trainers[0]:
if encryption_method == "ckks":
enc_pred_trainer = ts.ckks_vector(public_context, pred_trainer)
elif encryption_method == "paillier":
enc_pred_trainer = Paillier.encrypt(public_context, pred_trainer,
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
elif encryption_method == "plain":
enc_pred_trainer = pred_trainer
else:
pass
# encrypt regular loss of trainers
if encryption_method == "ckks":
enc_regular_loss = ts.ckks_vector(public_context, regular_loss_tmp)
elif encryption_method == "paillier":
enc_regular_loss = Paillier.encrypt(public_context, regular_loss_tmp,
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
elif encryption_method == "plain":
enc_regular_loss = regular_loss_tmp
# communicate and calculate multiplication of trainers
logger.info("Calculate predicted exp result of all trainers.")
if self.node_id == self.trainers[0]:
if len(self.trainers) > 1:
if encryption_method == "ckks":
self.dual_trainers[self.trainers[1]].send(enc_pred_trainer.serialize(), use_pickle=False)
elif encryption_method == "paillier":
self.dual_trainers[self.trainers[1]].send(Paillier.serialize(enc_pred_trainer),
use_pickle=False)
elif encryption_method == "plain":
self.dual_trainers[self.trainers[1]].send(enc_pred_trainer)
elif len(self.trainers) == 1:
if encryption_method == "ckks":
self.dual_channels["intermediate_label_trainer"].send(enc_pred_trainer.serialize(),
use_pickle=False)
elif encryption_method == "paillier":
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(enc_pred_trainer),
use_pickle=False)
elif encryption_method == "plain":
self.dual_channels["intermediate_label_trainer"].send(enc_pred_trainer)
elif len(self.trainers) > 1:
train_ind = self.trainers.index(self.node_id)
pred_recv = self.dual_trainers[self.trainers[train_ind - 1]]
if train_ind != len(self.trainers) - 1:
pred_send = self.dual_trainers[self.trainers[train_ind + 1]]
if encryption_method == "ckks":
pre_pred = ts.ckks_vector_from(public_context, pred_recv.recv(use_pickle=False))
multi_pred = pre_pred * pred_trainer
pred_send.send(multi_pred.serialize(), use_pickle=False)
elif encryption_method == "paillier":
pre_pred = Paillier.ciphertext_from(public_context, pred_recv.recv(use_pickle=False))
multi_pred = pre_pred * pred_trainer
pred_send.send(Paillier.serialize(multi_pred), use_pickle=False)
elif encryption_method == "plain":
pre_pred = pred_recv.recv()
multi_pred = pre_pred * pred_trainer
pred_send.send(multi_pred)
elif train_ind == len(self.trainers) - 1:
if encryption_method == "ckks":
pre_pred = ts.ckks_vector_from(public_context, pred_recv.recv(use_pickle=False))
multi_pred = pre_pred * pred_trainer
self.dual_channels["intermediate_label_trainer"].send(multi_pred.serialize(),
use_pickle=False)
elif encryption_method == "paillier":
pre_pred = Paillier.ciphertext_from(public_context, pred_recv.recv(use_pickle=False))
multi_pred = pre_pred * pred_trainer
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(multi_pred),
use_pickle=False)
elif encryption_method == "plain":
pre_pred = pred_recv.recv()
multi_pred = pre_pred * pred_trainer
self.dual_channels["intermediate_label_trainer"].send(multi_pred)
# send intermediate results to label trainer.
logger.info("Send intermediate result to label trainer.")
if encryption_method == "ckks":
enc_pred_tmp = ts.ckks_vector(public_context, pred_tmp.numpy().astype(np.float32).flatten())
self.dual_channels["intermediate_label_trainer"].send(enc_pred_tmp.serialize(),
use_pickle=False)
self.dual_channels["intermediate_label_trainer"].send(enc_regular_loss.serialize(),
use_pickle=False)
elif encryption_method == "paillier":
enc_pred_tmp = Paillier.encrypt(public_context,
pred_tmp.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(enc_pred_tmp),
use_pickle=False)
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(enc_regular_loss),
use_pickle=False)
elif encryption_method == "plain":
enc_pred_tmp = pred_tmp.numpy().astype(np.float32).flatten()
self.dual_channels["intermediate_label_trainer"].send(enc_pred_tmp, use_pickle=True)
self.dual_channels["intermediate_label_trainer"].send(enc_regular_loss, use_pickle=True)
# receive intermediate result d from label_trainer
logger.info("Receive intermediate result d from label_trainer.")
if encryption_method == "ckks":
enc_d = self.dual_channels["intermediate_label_trainer"].recv(use_pickle=False)
enc_d = ts.ckks_vector_from(public_context, enc_d)
elif encryption_method == "paillier":
enc_d = self.dual_channels["intermediate_label_trainer"].recv(use_pickle=False)
enc_d = Paillier.ciphertext_from(public_context, enc_d)
elif encryption_method == "plain":
enc_d = self.dual_channels["intermediate_label_trainer"].recv()
# calculate gradient for trainer and send to assist_trainer
logger.info("Calculate gradients for trainer.")
if encryption_method == "ckks":
enc_regular_gradient_tmp = ts.ckks_vector(public_context,
regular_gradient_tmp.numpy().astype(np.float32).flatten())
elif encryption_method == "paillier":
enc_regular_gradient_tmp = Paillier.encrypt(
public_context, regular_gradient_tmp.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True, num_cores=num_cores)
elif encryption_method == "plain":
enc_regular_gradient_tmp = regular_gradient_tmp.numpy().astype(np.float32).flatten()
if encryption_method == "ckks":
gradient_trainer_w = enc_d.matmul(x_batch[0].numpy()) + enc_regular_gradient_tmp
else:
gradient_trainer_w = np.matmul(enc_d.reshape(1, len(enc_d)), x_batch[0].numpy()
) + enc_regular_gradient_tmp
# add noise to encrypted gradients and send to assist_trainer
if encryption_method == "ckks":
logger.info("Calculate noised gradient for trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch[0].shape[1])],
dtype=np.float32)
noise /= 100000
noised_gradient_trainer_w = gradient_trainer_w + noise
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(noised_gradient_trainer_w.serialize(), use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_gradient_trainer_w = self.dual_channels["gradients_loss"].recv()
gradient_trainer_w = noised_gradient_trainer_w - noise
elif encryption_method == "paillier":
logger.info("Calculate noised gradient for trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch[0].shape[1])],
dtype=np.float32)
noise /= 100000
noised_gradient_trainer_w = gradient_trainer_w + noise
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_trainer_w),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_gradient_trainer_w = self.dual_channels["gradients_loss"].recv()
gradient_trainer_w = noised_gradient_trainer_w - noise
# gradient_trainer_w = torch.FloatTensor(gradient_trainer_w).unsqueeze(-1)
# update w and b of trainer
gradient_trainer_w = gradient_trainer_w / x_batch[0].shape[0]
logger.info("Update weights of trainer.")
self.model.linear.weight -= (torch.FloatTensor(gradient_trainer_w) * self.optimizer_config["lr"])
# predict train and val for metrics
logger.info("Predict train weights of trainer.")
self.predict(self.train_dataloader)
logger.info("Predict val weights of trainer.")
self.predict(self.val_dataloader)
# receive flags
early_stop_flag, best_model_flag, patient = self.dual_channels["intermediate_label_trainer"].recv(
use_pickle=True)
# update best model
if best_model_flag:
self.best_model = copy.deepcopy(self.model)
# if need to save results by epoch
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
ModelPreserver.save(save_dir=self.save_dir,
model_name=self.save_model_name,
state_dict=self.model.state_dict(),
epoch=epoch)
# if early stopping, break
if early_stop_flag:
break
# save model for infer
save_model_config(stage_model_config=self.export_conf, save_path=Path(self.save_dir))
# if not early stopping, save model
ModelPreserver.save(save_dir=self.save_dir, model_name=self.save_model_name,
state_dict=self.best_model.state_dict(), final=True)
# send w to label trainer
self._save_feature_importance(self.dual_channels["intermediate_label_trainer"])
def _save_feature_importance(self, channel):
channel.send((FedNode.node_id, self.best_model.state_dict()["linear.weight"][0]))
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com", ids=[self.node_id] + FedConfig.get_trainer())
dim_channel.send(self.data_dim)
| 19,818 | 57.463127 | 120 | py |
XFL | XFL-master/python/algorithm/framework/vertical/sampler/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from algorithm.core.data_io import CsvReader
from common.communication.gRPC.python.channel import BroadcastChannel
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
class VerticalSamplerBase(TrainConfigParser):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
self._init_data()
self.broadcast_channel = BroadcastChannel(name="vertical_sampler_channel")
self.save_id = self.output.get("sample_id", {})
self.save_data_path = self.output["path"] / Path(self.output["dataset"]["name"])
if not os.path.exists(os.path.dirname(self.save_data_path)):
os.makedirs(os.path.dirname(self.save_data_path))
def __load_data(self, config) -> CsvReader:
if len(config) > 1:
logger.warning("More than one dataset is not supported.")
config = config[0]
if config["type"] == "csv":
data_reader = CsvReader(path=os.path.join(config["path"], config["name"]), has_id=config["has_id"],
has_label=config["has_label"])
else:
raise NotImplementedError("Dataset type {} is not supported.".format(config["type"]))
return data_reader
def _init_data(self) -> None:
if len(self.input["dataset"]) > 0:
data: CsvReader = self.__load_data(self.input["dataset"])
self.data = data.table.set_index(data.ids)
self.label_name = data.label_name()
else:
raise NotImplementedError("Dataset was not configured.")
| 2,228 | 40.277778 | 111 | py |
XFL | XFL-master/python/algorithm/framework/vertical/sampler/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from pathlib import Path
from typing import Any
import pandas as pd
from sklearn.utils import resample
from algorithm.framework.vertical.sampler.base import VerticalSamplerBase
from common.utils.logger import logger
from service.fed_control import ProgressCalculator
class VerticalSamplerLabelTrainer(VerticalSamplerBase):
"""
local feature sampler
method: str, "random" or "stratify", default: "random";
strategy: str, "downsample" or "upsample", default: "downsample";
fraction: int, float or list (sampling ratios of each category),
e.g. [[0,0.1], [1,0.2]], default: {"percentage": 0.1};
random_state: int, RandomState instance or None, default=None
"""
def __init__(self, train_conf):
super().__init__(train_conf)
self.label_count = {}
self.label_idset = {}
self._parse_config()
self._init_data()
self.sample_ids = []
def _parse_config(self) -> None:
self.random_state = self.train_params.get("random_state", None)
self.method = self.train_params.get("method", "random")
self.strategy = self.train_params.get("strategy", "downsample")
self.fraction = self.train_params.get("fraction", {"number": 0.1})
# whether for new customers filter
self.infer_params = self.train_params.get("marketing_specified", {})
if len(self.infer_params) > 0:
self.threshold_method = self.infer_params["threshold_method"]
self.threshold = self.infer_params["threshold"]
def fraction_transform(self) -> Any:
def fraction_num(fraction, num):
frac_num = int(fraction * num)
if self.strategy == "downsample":
if fraction < 0 or fraction > 1:
raise ValueError("Fraction should be a numeric number between 0 and 1")
return max(1, frac_num)
elif self.strategy == "upsample":
if fraction < 0:
raise ValueError("Fraction should be a numeric number larger than 0")
return frac_num
fraction_key = list(self.fraction.keys())[0]
if fraction_key == "percentage":
tmp = fraction_num(self.fraction[fraction_key], len(self.data))
return tmp
elif fraction_key == "labeled_percentage":
tmp = [(tup[0], fraction_num(tup[1], self.label_count[tup[0]])) for tup in self.fraction[fraction_key]]
return tmp
elif fraction_key == "number":
return self.fraction[fraction_key]
else:
raise NotImplementedError("Fraction key {} is not supported.".format(fraction_key))
def random_method(self) -> Any:
sample_num = self.fraction_transform()
if self.strategy == "downsample":
sample_ids = resample(self.data.index,
replace=False,
n_samples=sample_num,
random_state=self.random_state)
new_data = self.data.loc[sample_ids]
logger.info("Downsample completed.")
return sample_ids, new_data
elif self.strategy == "upsample":
sample_ids = resample(self.data.index,
replace=True,
n_samples=sample_num,
random_state=self.random_state)
new_data = self.data.loc[sample_ids]
logger.info("Upsample completed.")
return sample_ids, new_data
else:
raise NotImplementedError("Strategy type {} is not supported.".format(self.strategy))
def stratify_method(self) -> Any:
sample_num = self.fraction_transform()
sample_ids = []
new_data = pd.DataFrame()
if self.strategy == "downsample":
for label, label_num in sample_num:
sample_ids_ = resample(self.label_idset[label],
replace=False,
n_samples=label_num,
random_state=self.random_state)
new_data_ = self.data.loc[sample_ids_]
sample_ids += sample_ids_
new_data = pd.concat([new_data, new_data_])
logger.info("Downsample completed.")
return sample_ids, new_data
elif self.strategy == "upsample":
for label, label_num in sample_num:
sample_ids_ = resample(self.label_idset[label],
replace=True,
n_samples=label_num,
random_state=self.random_state)
new_data_ = self.data.loc[sample_ids_]
sample_ids += sample_ids_
new_data = pd.concat([new_data, new_data_])
logger.info("Upsample completed.")
return sample_ids, new_data
else:
raise NotImplementedError("Strategy type {} is not supported.".format(self.strategy))
def fit(self) -> None:
new_data = None
# for most cases
if len(self.infer_params) == 0:
if self.method == "random":
self.sample_ids, new_data = self.random_method()
logger.info("Random sampler completed.")
elif self.method == "stratify":
self.label_count = self.data.groupby(self.label_name)[self.label_name].count().to_dict()
self.label_idset = self.data.groupby(self.label_name).apply(lambda group: list(group.index)).to_dict()
self.sample_ids, new_data = self.stratify_method()
logger.info("Stratify sampler completed.")
else:
raise NotImplementedError("Method type {} is not supported.".format(self.method))
# for new customers filter
elif len(self.infer_params) > 0:
key = self.data.columns[0]
if self.threshold_method == "percentage":
threshold_num = len(self.data) * self.threshold
self.sample_ids = self.data.sort_values(by=key, ascending=False).iloc[:int(threshold_num)].index
elif self.threshold_method == "number":
if self.threshold > len(self.data):
raise OverflowError("Threshold number {} is larger than input data size.".format(self.threshold))
else:
self.sample_ids = self.data.sort_values(by=key, ascending=False).iloc[:int(self.threshold)].index
elif self.threshold_method == "score":
self.sample_ids = self.data[self.data[key] > self.threshold].index
else:
raise NotImplementedError("Method type {} is not supported.".format(self.threshold_method))
# save
if new_data is not None:
new_data.to_csv(self.save_data_path, index=self.input["dataset"][0]["has_id"])
logger.info("Data saved to {}.".format(self.save_data_path))
if len(self.save_id) > 0:
save_id_path = self.output["path"] / Path(self.output["sample_id"]["name"])
if not os.path.exists(os.path.dirname(save_id_path)):
os.makedirs(os.path.dirname(save_id_path))
with open(save_id_path, "w") as wf:
json.dump(list(self.sample_ids), wf)
logger.info("Sample ids saved to {}.".format(save_id_path))
# send ids to trainer
self.broadcast_channel.broadcast(self.sample_ids)
# update the progress of 100 to show the training is finished
ProgressCalculator.finish_progress()
| 8,302 | 45.385475 | 118 | py |
XFL | XFL-master/python/algorithm/framework/vertical/sampler/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/vertical/sampler/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from pathlib import Path
from algorithm.framework.vertical.sampler.base import VerticalSamplerBase
from common.utils.logger import logger
class VerticalSamplerTrainer(VerticalSamplerBase):
def __init__(self, train_conf):
super().__init__(train_conf)
def fit(self) -> None:
# receive sample_ids
sample_ids = self.broadcast_channel.recv()
new_data = self.data.loc[sample_ids]
if len(self.save_id) > 0:
save_id_path = self.output["path"] / Path(self.output["sample_id"]["name"])
if not os.path.exists(os.path.dirname(save_id_path)):
os.makedirs(os.path.dirname(save_id_path))
with open(save_id_path, "w") as wf:
json.dump(list(sample_ids), wf)
logger.info("Sample ids saved to {}.".format(save_id_path))
new_data.to_csv(self.save_data_path, index=self.input["dataset"][0]["has_id"])
logger.info("Data saved to {}.".format(self.save_data_path))
| 1,616 | 39.425 | 87 | py |
XFL | XFL-master/python/algorithm/framework/vertical/linear_regression/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.framework.vertical.vertical_model_base import VerticalModelBase
from common.utils.logger import logger
from common.utils.model_preserver import ModelPreserver
from service.fed_config import FedConfig
class VerticalLinearRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super(VerticalLinearRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, 1, bias=bias)
self.linear.requires_grad_(False)
def forward(self, x):
return self.linear(x)
class VerticalLinearRegressionBase(VerticalModelBase):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""_summary_
Args:
train_conf (dict): _description_
label (bool, optional): _description_. Defaults to False.
"""
super().__init__(train_conf)
self._parse_config()
self.train_conf = train_conf
self.label = label
self.data_dim = None
self.model = None
self.train_dataloader, self.eval_dataloader = None, None
if FedConfig.node_id != "assist_trainer":
self._init_dataloader()
def _parse_config(self) -> None:
super()._parse_config()
self.model_name = self.model_info.get("name")
self.save_model_name = self.output.get("model", {}).get("name")
self.save_onnx_model_name = self.output.get("onnx_model", {}).get("name", "")
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.global_epoch = self.train_params.get("global_epoch")
self.batch_size = self.train_params.get("batch_size")
self.encryption_config = self.train_params.get("encryption")
self.optimizer_config = self.train_params.get("optimizer")
self.pretrain_model_path = self.input.get("pretrained_model", {}).get("path")
self.random_seed = self.train_params.get("random_seed", None)
self.early_stopping_config = self.train_params.get("early_stopping")
self.save_frequency = self.interaction_params.get("save_frequency")
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def _init_model(self, bias: bool = False) -> None:
"""
Init linear regression model.
Returns: None
"""
logger.info("Init model start.")
self.model = VerticalLinearRegression(input_dim=self.data_dim, bias=bias)
# Load pretrained model if needed.
if self.pretrain_model_path is not None and self.pretrain_model_path != "":
checkpoint = ModelPreserver.load(os.path.join(self.pretrain_model_path, self.input.get(
"pretrained_model").get("name", None)))
self.model.load_state_dict(checkpoint["state_dict"])
logger.info("Init model completed.")
def __load_data(self, config) -> CsvReader:
config = config[0]
if config["type"] == "csv":
data_reader = CsvReader(path=os.path.join(config["path"], config["name"]), has_id=config["has_id"],
has_label=config["has_label"])
else:
raise NotImplementedError("Dataset type {} is not supported.".format(config["type"]))
return data_reader
def _init_data(self) -> None:
if len(self.input_trainset) > 0:
data: CsvReader = self.__load_data(self.input_trainset)
self.train = data.features()
self.train_label = data.label()
self.train_ids = list(range(len(data.ids)))
else:
raise NotImplementedError("Trainset was not configured.")
if self.label:
assert len(self.train) == len(self.train_label)
if len(self.input_valset) > 0:
data: CsvReader = self.__load_data(self.input_valset)
self.val = data.features()
self.val_label = data.label()
self.val_ids = list(range(len(data.ids)))
if self.label:
assert len(self.val) == len(self.val_label)
def _init_dataloader(self) -> None:
"""
Load raw data.
Returns:
"""
logger.info("Dataloader initiation start.")
self._init_data()
if self.label:
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.train, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.train_label), dim=-1),
torch.unsqueeze(torch.tensor(self.train_ids), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.val, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.val_label), dim=-1),
torch.unsqueeze(torch.tensor(self.val_ids), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(self.train).shape[-1]
logger.info("Train data shape: {}.".format(list(torch.tensor(self.train).shape)))
else:
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.train, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.train_ids), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.val, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.val_ids), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(self.train).shape[-1]
logger.info("Train data shape: {}.".format(list(torch.tensor(self.train).shape)))
logger.info("Dataloader initiation completed.")
| 6,807 | 41.55 | 111 | py |
XFL | XFL-master/python/algorithm/framework/vertical/linear_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import secrets
from functools import reduce
from pathlib import Path
from typing import Optional
from common.checker.x_types import All
import numpy as np
import pandas as pd
import tenseal as ts
import torch
from common.checker.matcher import get_matched_config
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.algo_utils import earlyStopping
from common.utils.logger import logger
from common.utils.utils import save_model_config
from common.utils.model_io import ModelIO
from service.fed_config import FedConfig
from service.fed_node import FedNode
from service.fed_control import ProgressCalculator
from .base import VerticalLinearRegressionBase
class VerticalLinearRegressionLabelTrainer(VerticalLinearRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""
Vertical Linear Regression
Args:
train_conf: training parameters
*args:
**kwargs:
"""
self.sync_channel = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, label=True, *args, **kwargs)
if self.random_seed:
self.set_seed(self.random_seed)
self.progress_calculator = ProgressCalculator(self.global_epoch, len(self.train_dataloader))
self._init_model(bias=True)
self.export_conf = [{
"class_name": "VerticalLinearRegression",
"identity": self.identity,
"filename": self.save_onnx_model_name,
"input_dim": self.data_dim,
"bias": True,
"version": "1.4.0"
}]
self.es = earlyStopping(key=self.early_stopping_config["key"],
patience=self.early_stopping_config["patience"],
delta=self.early_stopping_config["delta"])
self.best_model = None
self.broadcast_channel = BroadcastChannel(name="Public keys", root_id=FedConfig.get_assist_trainer())
self.dual_channels = {"intermediate_label_trainer": {}, "gradients_loss": None}
for party_id in FedConfig.get_trainer():
self.dual_channels["intermediate_label_trainer"][party_id] = DualChannel(
name="intermediate_label_trainer_" + party_id, ids=[FedConfig.node_id, party_id])
self.dual_channels["gradients_loss"] = DualChannel(name="gradients_loss_" + FedConfig.node_id,
ids=[FedConfig.get_assist_trainer()] + [FedConfig.node_id])
self.train_result = None
self.val_result = None
self.dual_channels["gradients_loss"].send(len(self.train_dataloader))
self.dual_channels["gradients_loss"].send(self.global_epoch)
self.dual_channels["gradients_loss"].send(self.batch_size)
self.encryption_method = list(self.encryption_config.keys())[0].lower()
self.dual_channels["gradients_loss"].send(self.encryption_config)
self.dual_channels["gradients_loss"].send(self.encryption_method)
def _sync_config(self, config):
sync_rule = {
"train_info": All()
}
config_to_sync = get_matched_config(config, sync_rule)
self.sync_channel.broadcast(config_to_sync)
def predict(self, input_data):
pred_prob_epoch, y_epoch = [], []
for batch_idx, (x_batch, y_batch, _) in enumerate(input_data):
pred_trainer_list = []
pred_label_trainer = self.model(x_batch).numpy().astype(np.float32).flatten()
for party_id in FedConfig.get_trainer():
pred_trainer_list.append(self.dual_channels["intermediate_label_trainer"][party_id].recv(
use_pickle=True))
# calculate prediction of batch
pred_total = pred_label_trainer + reduce(lambda x, y: x + y, pred_trainer_list)
# calculate prediction of epoch
pred_prob_epoch += pred_total.tolist()
y_epoch += y_batch.numpy().astype(np.float32).flatten().tolist()
return y_epoch, pred_prob_epoch
def fit(self):
self.check_data()
public_context = None
num_cores = -1
rng = secrets.SystemRandom()
logger.info("Vertical linear regression training start")
# receive encryption key from assist trainer
if self.encryption_method == "ckks":
logger.info("Receive ckks public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = ts.context_from(public_context)
logger.info("Public key received.")
elif self.encryption_method == "paillier":
logger.info("Receive paillier public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = Paillier.context_from(public_context)
logger.info("Public key received.")
elif self.encryption_method == "plain":
pass
else:
raise ValueError(f"Encryption method {self.encryption_method} not supported! Valid methods are "
f"'paillier', 'ckks', 'plain'.")
# train
for epoch in range(1, self.global_epoch + 1):
loss_epoch = 0
for batch_idx, (x_batch, y_batch, _) in enumerate(self.train_dataloader):
pred_trainer = []
loss_trainer = []
loss_between_trainer = 0
enc_pred_residual = None
enc_loss_label_trainer = None
regular_loss_tmp = 0
regular_gradient_tmp = 0
enc_regular_gradient_tmp = 0
# calculate regular results
if self.optimizer_config['p'] == 1:
regular_loss_tmp = torch.abs(self.model.linear.weight).sum() * self.optimizer_config['alpha']
regular_gradient_tmp = self.optimizer_config['alpha'] * (torch.abs(self.model.linear.weight)
/ self.model.linear.weight)
elif self.optimizer_config['p'] == 2:
regular_loss_tmp = (self.model.linear.weight ** 2).sum() * self.optimizer_config['alpha'] / 2
regular_gradient_tmp = self.optimizer_config['alpha'] * self.model.linear.weight
elif self.optimizer_config['p'] == 0:
pass
# compute theta_scheduler * label_trainer and loss of label_trainer
logger.info("Calculate intermediate result of label trainer.")
pred_label_trainer = self.model(x_batch)
pred_residual = pred_label_trainer - y_batch
# receive intermediate results from trainers
for party_id in FedConfig.get_trainer():
if self.encryption_method == "ckks":
pred_trainer.append(ts.ckks_vector_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False)))
loss_trainer.append(ts.ckks_vector_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False)))
elif self.encryption_method == "paillier":
pred_trainer.append(Paillier.ciphertext_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False)))
loss_trainer.append(Paillier.ciphertext_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False)))
elif self.encryption_method == "plain":
pred_trainer.append(self.dual_channels["intermediate_label_trainer"][party_id].recv())
loss_trainer.append(self.dual_channels["intermediate_label_trainer"][party_id].recv())
logger.info("Received predictions from trainers, length of collect list is {}."
.format(len(pred_trainer)))
# calculate total loss
logger.info("Calculate total loss.")
square_tmp = (pred_residual ** 2).sum() / 2
loss_label_trainer = square_tmp + regular_loss_tmp
if self.encryption_method == "ckks":
loss_between_label_trainer = np.sum([pred_t.matmul(pred_residual.numpy()) for pred_t in pred_trainer
])
else:
loss_between_label_trainer = np.sum(pred_residual.numpy().flatten() * pred_trainer
)
# calculate total loss_between_trainer when there are more than one trainer
if len(pred_trainer) > 1:
if self.encryption_method == "plain":
loss_between_trainer = np.sum([np.sum(i * j) if ind_i != ind_j else 0
for ind_i, i in enumerate(pred_trainer)
for ind_j, j in enumerate(pred_trainer)]) / 2
elif self.encryption_method == "ckks":
loss_between_trainer = np.sum([i.dot(j) if ind_i != ind_j else 0
for ind_i, i in enumerate(pred_trainer)
for ind_j, j in enumerate(pred_trainer)]) * 0.5
elif self.encryption_method == "paillier":
loss_between_trainer = []
for party_id in FedConfig.get_trainer():
tmp = self.dual_channels["intermediate_label_trainer"][party_id].recv(use_pickle=False)
tmp = Paillier.ciphertext_from(public_context, tmp)
loss_between_trainer.append(tmp)
loss_between_trainer = np.sum(loss_between_trainer) / 2
if self.encryption_method == "ckks":
enc_loss_label_trainer = ts.ckks_vector(public_context,
loss_label_trainer.numpy().astype(np.float32).flatten())
elif self.encryption_method == "paillier":
enc_loss_label_trainer = Paillier.encrypt(public_context,
float(loss_label_trainer),
precision=self.encryption_config[self.encryption_method][
"precision"],
obfuscation=True,
num_cores=num_cores)
elif self.encryption_method == "plain":
enc_loss_label_trainer = loss_label_trainer
enc_loss_batch = loss_between_trainer + loss_between_label_trainer + enc_loss_label_trainer + np.sum(
loss_trainer)
# send total loss to assist_trainer
logger.info("Send total loss to assist_trainer.")
if self.encryption_method == "ckks":
self.dual_channels["gradients_loss"].send(enc_loss_batch.serialize(), use_pickle=False)
elif self.encryption_method == "paillier":
self.dual_channels["gradients_loss"].send(Paillier.serialize(enc_loss_batch), use_pickle=False)
elif self.encryption_method == "plain":
self.dual_channels["gradients_loss"].send(enc_loss_batch)
# receive decrypted loss from assist_trainer
logger.info("Receive total loss from assist_trainer.")
loss_batch = self.dual_channels["gradients_loss"].recv()
loss_batch = loss_batch / x_batch.shape[0]
logger.info("Loss of {} batch is {}".format(batch_idx, loss_batch))
loss_epoch += loss_batch * x_batch.shape[0]
# calculate intermediate result d
logger.info("Calculate intermediate result d.")
pred_rest_trainer = reduce(lambda x, y: x + y, pred_trainer)
if self.encryption_method == "ckks":
enc_pred_residual = ts.ckks_vector(public_context,
pred_residual.numpy().astype(np.float32).flatten())
elif self.encryption_method == "paillier":
enc_pred_residual = Paillier.encrypt(public_context,
pred_residual.numpy().astype(np.float32).flatten(),
precision=self.encryption_config[self.encryption_method][
"precision"], obfuscation=True, num_cores=num_cores)
elif self.encryption_method == "plain":
enc_pred_residual = pred_residual.numpy().astype(np.float32).flatten()
enc_d = enc_pred_residual + pred_rest_trainer
# send intermediate result d to trainer
logger.info("Send intermediate result d to trainer.")
for party_id in FedConfig.get_trainer():
if self.encryption_method == "ckks":
self.dual_channels["intermediate_label_trainer"][party_id].send(enc_d.serialize(),
use_pickle=False)
elif self.encryption_method == "paillier":
self.dual_channels["intermediate_label_trainer"][party_id].send(Paillier.serialize(enc_d),
use_pickle=False)
elif self.encryption_method == "plain":
self.dual_channels["intermediate_label_trainer"][party_id].send(enc_d)
# calculate gradient for label_trainer
logger.info("Calculate gradients for label_trainer.")
if self.encryption_method == "ckks":
enc_regular_gradient_tmp = ts.ckks_vector(public_context,
regular_gradient_tmp.numpy().astype(np.float32).flatten())
elif self.encryption_method == "paillier":
enc_regular_gradient_tmp = Paillier.encrypt(
public_context, regular_gradient_tmp.numpy().astype(np.float32).flatten(),
precision=self.encryption_config[self.encryption_method]["precision"],
obfuscation=True, num_cores=num_cores)
elif self.encryption_method == "plain":
enc_regular_gradient_tmp = regular_gradient_tmp.numpy().astype(np.float32).flatten()
if self.encryption_method == "ckks":
gradient_label_trainer_w = enc_d.matmul(x_batch.numpy()) + enc_regular_gradient_tmp
else:
gradient_label_trainer_w = np.matmul(enc_d.reshape(1, len(enc_d)), x_batch.numpy()
) + enc_regular_gradient_tmp
gradient_label_trainer_b = enc_d
if self.encryption_method == "ckks":
# add noise to encrypted gradients and send to assist_trainer
logger.info("Calculate noised gradients for label_trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
noise_b = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[0])],
dtype=np.float32)
noise_b /= 100000
noised_gradient_label_trainer_w = gradient_label_trainer_w + noise
noised_gradient_label_trainer_b = gradient_label_trainer_b + noise_b
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(noised_gradient_label_trainer_w.serialize(),
use_pickle=False)
self.dual_channels["gradients_loss"].send(noised_gradient_label_trainer_b.serialize(),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_decrypt_gradient = self.dual_channels["gradients_loss"].recv()
noised_decrypt_gradient_label_trainer_w = noised_decrypt_gradient["noised_gradient_label_trainer_w"]
noised_decrypt_gradient_label_trainer_b = noised_decrypt_gradient["noised_gradient_label_trainer_b"]
gradient_label_trainer_w = noised_decrypt_gradient_label_trainer_w - noise
gradient_label_trainer_b = noised_decrypt_gradient_label_trainer_b - np.sum(noise_b)
elif self.encryption_method == "paillier":
# add noise to encrypted gradients and send to assist_trainer
logger.info("Calculate noised gradients for label_trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
noise_b = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[0])],
dtype=np.float32)
noise_b /= 100000
noised_gradient_label_trainer_w = gradient_label_trainer_w + noise
noised_gradient_label_trainer_b = gradient_label_trainer_b + noise_b
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_label_trainer_w),
use_pickle=False)
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_label_trainer_b),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_decrypt_gradient = self.dual_channels["gradients_loss"].recv()
noised_decrypt_gradient_label_trainer_w = noised_decrypt_gradient["noised_gradient_label_trainer_w"]
noised_decrypt_gradient_label_trainer_b = noised_decrypt_gradient["noised_gradient_label_trainer_b"]
gradient_label_trainer_w = noised_decrypt_gradient_label_trainer_w - noise
gradient_label_trainer_b = noised_decrypt_gradient_label_trainer_b - np.sum(noise_b)
elif self.encryption_method == "plain":
gradient_label_trainer_b = gradient_label_trainer_b.sum()
# update w and b of label_trainer
gradient_label_trainer_w = gradient_label_trainer_w / x_batch.shape[0]
gradient_label_trainer_b = gradient_label_trainer_b / x_batch.shape[0]
logger.info("Update weights of label trainer.")
self.model.linear.weight -= (torch.FloatTensor(gradient_label_trainer_w) * self.optimizer_config["lr"])
self.model.linear.bias -= (gradient_label_trainer_b * self.optimizer_config["lr"])
# calculate and update the progress of the training
self.progress_calculator.cal_custom_progress(epoch, batch_idx+1)
loss_epoch = loss_epoch * (1 / len(self.train))
logger.info("Loss of {} epoch is {}".format(epoch, loss_epoch))
# predict train and val results for metrics
logger.info("Predict train weights of label trainer.")
self.train_result = self.predict(self.train_dataloader)
loss_train_met = {"loss": loss_epoch}
self._calc_metrics(np.array(self.train_result[1], dtype=float), np.array(self.train_result[0]),
epoch, stage="train", loss=loss_train_met)
logger.info("Predict val weights of label trainer.")
self.val_result = self.predict(self.val_dataloader)
val_residual = np.array(self.val_result[1]) - np.array(self.val_result[0])
loss_val_met = {"loss": np.mean((val_residual ** 2) / 2)} # no regular
val_metrics = self._calc_metrics(np.array(self.val_result[1], dtype=float), np.array(self.val_result[0]),
epoch, stage="val", loss=loss_val_met)
# early stopping
val_metrics["loss"] = - val_metrics["loss"]
if self.early_stopping_config["patience"] > 0:
early_stop_flag, best_model_flag = self.es(val_metrics)
else:
early_stop_flag, best_model_flag = False, True
# update best model
if best_model_flag:
self.best_model = copy.deepcopy(self.model)
# send flags to trainers
for party_id in FedConfig.get_trainer():
self.dual_channels["intermediate_label_trainer"][party_id].send(
[early_stop_flag, best_model_flag, self.early_stopping_config["patience"]], use_pickle=True)
# if need to save results by epoch
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
# ModelPreserver.save(save_dir=self.save_dir, model_name=self.save_model_name,
# state_dict=self.model.state_dict(), epoch=epoch)
self.save_model(epoch=epoch)
# if early stopping, break
if early_stop_flag:
# update the progress of 100 to show the training is finished
ProgressCalculator.finish_progress()
break
# save model for infer
self.save_model(epoch=None)
# if not early stopping, save probabilities and model
self._save_prob()
# calculate feature importance
self._save_feature_importance(self.dual_channels)
def save_model(self, epoch: Optional[int] = None):
if not epoch:
save_model_config(stage_model_config=self.export_conf,
save_path=Path(self.save_dir))
if self.save_model_name:
ModelIO.save_torch_model(
state_dict=self.best_model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
meta_dict={},
epoch=epoch
)
if self.save_onnx_model_name:
ModelIO.save_torch_onnx(
model=self.best_model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
epoch=epoch
)
def _save_prob(self):
if self.interaction_params.get("write_training_prediction"):
self._write_prediction(self.train_result[1], self.train_result[0], self.train_ids,
stage="train", final=True)
if self.interaction_params.get("write_validation_prediction"):
self._write_prediction(self.val_result[1], self.val_result[0], self.val_ids,
stage="val", final=True)
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com", ids=[FedConfig.node_id] + FedConfig.get_trainer())
n = self.data_dim
dims = dim_channel.collect()
for dim in dims:
n += dim
if n <= 0:
raise ValueError("Number of the feature is zero. Stop training.")
def _save_feature_importance(self, channel):
res = {"owner_id": [], "fid": [], "importance": []}
other_weight_list = []
for party_id in FedConfig.get_trainer():
other_weight_list.append(channel["intermediate_label_trainer"][party_id].recv(use_pickle=True))
for (owner_id, weights) in other_weight_list:
for fid, weight in enumerate(weights):
res["owner_id"].append(owner_id)
res["fid"].append(fid)
res["importance"].append(float(weight))
for fid, weight in enumerate(self.best_model.state_dict()["linear.weight"][0]):
res["owner_id"].append(FedNode.node_id)
res["fid"].append(fid)
res["importance"].append(float(weight))
res = pd.DataFrame(res).sort_values(by="importance", key=lambda col: np.abs(col), ascending=False)
res.to_csv(Path(self.save_dir, self.output["feature_importance"]["name"]), header=True, index=False,
float_format="%.6g")
| 26,235 | 57.825112 | 120 | py |
XFL | XFL-master/python/algorithm/framework/vertical/linear_regression/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tenseal as ts
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.logger import logger
from service.fed_config import FedConfig
class VerticalLinearRegressionAssistTrainer(object):
def __init__(self, *args, **kwargs):
"""[summary]
assist_trainer
"""
self.broadcast_channel = BroadcastChannel(name="Public keys", root_id=FedConfig.get_assist_trainer())
self.dual_channels = {"gradients_loss": {}}
self.party_id_list = FedConfig.get_label_trainer() + FedConfig.get_trainer()
for party_id in self.party_id_list:
self.dual_channels["gradients_loss"][party_id] = DualChannel(name="gradients_loss_" + party_id,
ids=[FedConfig.node_id, party_id])
self.batch_num = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
self.global_epoch = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
self.batch_size = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
self.encryption_config = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
self.encryption_method = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
self.private_context = None
self.public_context = None
# send encryption key to all parties
if self.encryption_method == "ckks":
self.private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=self.encryption_config[self.encryption_method]["poly_modulus_degree"],
coeff_mod_bit_sizes=self.encryption_config[self.encryption_method]["coeff_mod_bit_sizes"]
)
self.private_context.generate_galois_keys()
self.private_context.generate_relin_keys()
self.private_context.global_scale = 1 << self.encryption_config[self.encryption_method][
"global_scale_bit_size"]
serialized_public_context = self.private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
logger.info("Broadcast ckks public keys.")
self.public_context_ser = serialized_public_context
self.broadcast_channel.broadcast(self.public_context_ser, use_pickle=False)
logger.info("Broadcast completed.")
elif self.encryption_method == "paillier":
self.num_cores = -1 if self.encryption_config[self.encryption_method]["parallelize_on"] else 1
self.private_context = Paillier.context(self.encryption_config[self.encryption_method]["key_bit_size"],
djn_on=self.encryption_config[self.encryption_method]["djn_on"])
logger.info("Broadcast paillier public keys.")
self.public_context_ser = self.private_context.to_public().serialize()
self.broadcast_channel.broadcast(self.public_context_ser, use_pickle=False)
logger.info("Broadcast completed.")
elif self.encryption_method == "plain":
pass
else:
raise ValueError(f"Encryption method {self.encryption_method} not supported! Valid methods are 'paillier', "
f"'ckks', 'plain'.")
def fit(self):
""" train model
Model parameters need to be updated before fitting.
"""
# send encryption key to all parties
if self.encryption_method in ["ckks", "paillier"]:
logger.info("Broadcast ckks public keys.")
self.broadcast_channel.broadcast(self.public_context_ser, use_pickle=False)
logger.info("Broadcast completed.")
# train
for epoch in range(1, self.global_epoch + 1):
for batch_idx in range(self.batch_num):
# receive and decrypt total encrypted loss and send to label_trainer
logger.info("Receive and decrypted total loss and send back to label_trainer.")
if self.encryption_method == "ckks":
enc_loss_batch = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv(
use_pickle=False)
decrypted_loss_batch = ts.ckks_vector_from(self.private_context, enc_loss_batch).decrypt()[0]
elif self.encryption_method == "paillier":
enc_loss_batch = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv(
use_pickle=False)
decrypted_loss_batch = Paillier.decrypt(self.private_context, Paillier.ciphertext_from(
None, enc_loss_batch), dtype='float', num_cores=self.num_cores)
elif self.encryption_method == "plain":
enc_loss_batch = self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].recv()
decrypted_loss_batch = enc_loss_batch
self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].send(decrypted_loss_batch)
logger.info(
"Loss of {} batch {} epoch is {}".format(batch_idx, epoch, decrypted_loss_batch / self.batch_size))
# receive encrypted noised gradients from other parties and decrypt and send back to other parties
if self.encryption_method == "ckks" or self.encryption_method == "paillier":
# trainer
for party_id in FedConfig.get_trainer():
en_noised_gradient_trainer_w = self.dual_channels["gradients_loss"][party_id].recv(
use_pickle=False)
if self.encryption_method == "ckks":
noised_gradient_trainer_w = ts.ckks_vector_from(self.private_context,
en_noised_gradient_trainer_w).decrypt()
elif self.encryption_method == "paillier":
noised_gradient_trainer_w = Paillier.decrypt(self.private_context, Paillier.ciphertext_from(
None, en_noised_gradient_trainer_w), dtype='float', num_cores=self.num_cores)
self.dual_channels["gradients_loss"][party_id].send(noised_gradient_trainer_w)
# label_trainer
en_noised_gradient_label_trainer_w = self.dual_channels["gradients_loss"][
FedConfig.get_label_trainer()[0]].recv(use_pickle=False)
en_noised_gradient_label_trainer_b = self.dual_channels["gradients_loss"][
FedConfig.get_label_trainer()[0]].recv(use_pickle=False)
if self.encryption_method == "ckks":
noised_gradient_label_trainer_w = ts.ckks_vector_from(
self.private_context, en_noised_gradient_label_trainer_w).decrypt()
noised_gradient_label_trainer_b = ts.ckks_vector_from(
self.private_context, en_noised_gradient_label_trainer_b).decrypt()
elif self.encryption_method == "paillier":
noised_gradient_label_trainer_w = Paillier.decrypt(
self.private_context, Paillier.ciphertext_from(None, en_noised_gradient_label_trainer_w),
dtype='float', num_cores=self.num_cores)
noised_gradient_label_trainer_b = Paillier.decrypt(
self.private_context, Paillier.ciphertext_from(None, en_noised_gradient_label_trainer_b),
dtype='float', num_cores=self.num_cores)
# calculate sum of gradient b
noised_gradient_label_trainer_b = np.sum(noised_gradient_label_trainer_b)
grad_send = {"noised_gradient_label_trainer_w": noised_gradient_label_trainer_w,
"noised_gradient_label_trainer_b": noised_gradient_label_trainer_b}
self.dual_channels["gradients_loss"][FedConfig.get_label_trainer()[0]].send(grad_send)
| 9,101 | 61.772414 | 120 | py |
XFL | XFL-master/python/algorithm/framework/vertical/linear_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/vertical/linear_regression/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import secrets
from pathlib import Path
from typing import Optional
import numpy as np
import tenseal as ts
import torch
from common.utils.utils import update_dict
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.logger import logger
from common.utils.model_io import ModelIO
from service.fed_config import FedConfig
from service.fed_node import FedNode
from common.utils.utils import save_model_config
from .base import VerticalLinearRegressionBase
class VerticalLinearRegressionTrainer(VerticalLinearRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""[summary]
Args:
train_conf (dict): [description]
"""
self.sync_channel = BroadcastChannel(name="sync")
conf = self._sync_config()
update_dict(train_conf, conf)
super().__init__(train_conf, label=False, *args, **kwargs)
self._init_model()
self.export_conf = [{
"class_name": "VerticalLinearRegression",
"identity": self.identity,
"filename": self.save_onnx_model_name,
"input_dim": self.data_dim,
"bias": False
}]
if self.random_seed:
self.set_seed(self.random_seed)
self.best_model = None
self.broadcast_channel = BroadcastChannel(name="Public keys", root_id=FedConfig.get_assist_trainer())
if len(FedConfig.get_trainer()) > 1:
self.broadcast_trainer = BroadcastChannel(name="Trainer exchange", root_id=FedConfig.node_id,
ids=FedConfig.get_trainer())
self.dual_channels = {
"intermediate_label_trainer": DualChannel(name="intermediate_label_trainer_" + FedConfig.node_id,
ids=FedConfig.get_label_trainer() + [FedConfig.node_id]),
"gradients_loss": DualChannel(name="gradients_loss_" + FedConfig.node_id,
ids=[FedConfig.get_assist_trainer()] + [FedConfig.node_id])
}
def predict(self, input_data):
for batch_idx, x_batch in enumerate(input_data):
# calculate prediction of batch
pred_trainer = self.model(x_batch[0])
# send to label_trainer
self.dual_channels["intermediate_label_trainer"].send(pred_trainer.numpy().astype(np.float32).flatten(),
use_pickle=True)
def _sync_config(self):
config = self.sync_channel.recv()
return config
def fit(self):
""" train model
Model parameters need to be updated before fitting.
"""
self.check_data()
num_cores = -1
encryption_config = self.encryption_config
encryption_method = list(self.encryption_config.keys())[0].lower()
logger.info("Vertical linear regression training start")
# receive encryption key from assist trainer
public_context = None
if encryption_method == "ckks":
logger.info("Receive ckks public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = ts.context_from(public_context)
logger.info("Public key received.")
elif encryption_method == "paillier":
logger.info("Receive paillier public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = Paillier.context_from(public_context)
logger.info("Public key received.")
elif encryption_method == "plain":
pass
else:
raise ValueError(
f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'ckks', 'plain'.")
rng = secrets.SystemRandom()
# train
for epoch in range(1, self.global_epoch + 1):
for batch_idx, x_batch in enumerate(self.train_dataloader):
regular_loss_tmp = 0
regular_gradient_tmp = 0
enc_regular_gradient_tmp = 0
# calculate regular results
if self.optimizer_config['p'] == 1:
regular_loss_tmp = torch.abs(self.model.linear.weight).sum() * self.optimizer_config['alpha']
regular_gradient_tmp = self.optimizer_config['alpha'] * (torch.abs(self.model.linear.weight)
/ self.model.linear.weight)
elif self.optimizer_config['p'] == 2:
regular_loss_tmp = (self.model.linear.weight ** 2).sum() * self.optimizer_config['alpha'] / 2
regular_gradient_tmp = self.optimizer_config['alpha'] * self.model.linear.weight
elif self.optimizer_config['p'] == 0:
pass
# compute theta_trainer * x_trainer and loss of x_trainer
pred_trainer = self.model(x_batch[0])
square_tmp = (pred_trainer ** 2).sum() / 2
loss_trainer = square_tmp + regular_loss_tmp
# send intermediate results to label trainer.
logger.info("Send intermediate result to label trainer.")
enc_pred_trainer = None
if encryption_method == "ckks":
enc_pred_trainer = ts.ckks_vector(public_context, pred_trainer.numpy().astype(np.float32).flatten())
enc_loss_trainer = ts.ckks_vector(public_context, loss_trainer.numpy().astype(np.float32).flatten())
self.dual_channels["intermediate_label_trainer"].send(enc_pred_trainer.serialize(),
use_pickle=False)
self.dual_channels["intermediate_label_trainer"].send(enc_loss_trainer.serialize(),
use_pickle=False)
elif encryption_method == "paillier":
enc_pred_trainer = Paillier.encrypt(public_context,
pred_trainer.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
enc_loss_trainer = Paillier.encrypt(public_context,
loss_trainer.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(enc_pred_trainer),
use_pickle=False)
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(enc_loss_trainer),
use_pickle=False)
elif encryption_method == "plain":
enc_pred_trainer = pred_trainer.numpy().astype(np.float32).flatten()
enc_loss_trainer = loss_trainer.numpy().astype(np.float32).flatten()
self.dual_channels["intermediate_label_trainer"].send(enc_pred_trainer, use_pickle=True)
self.dual_channels["intermediate_label_trainer"].send(enc_loss_trainer, use_pickle=True)
# exchange theta_trainer * x_trainer to calculate loss_between_trainer when encryption is paillier
logger.info("Calculate trainer_sum to label trainer when encryption is paillier.")
if encryption_method == "paillier" and len(FedConfig.get_trainer()) > 1:
trainer_sum = 0
logger.info("Send intermediate result to other trainers when encryption is paillier.")
self.broadcast_trainer.broadcast(Paillier.serialize(enc_pred_trainer), use_pickle=False)
logger.info("Receive intermediate result from other trainers when encryption is paillier.")
trainer_tmp = self.broadcast_trainer.collect(use_pickle=False)
for trainer_u in trainer_tmp:
trainer_u = Paillier.ciphertext_from(public_context, trainer_u)
trainer_sum += np.sum(trainer_u * pred_trainer.numpy().astype(np.float32).flatten())
logger.info("Send trainer_sum to label trainer when encryption is paillier.")
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(trainer_sum),
use_pickle=False)
# receive intermediate result d from label_trainer
logger.info("Receive intermediate result d from label_trainer.")
if encryption_method == "ckks":
enc_d = self.dual_channels["intermediate_label_trainer"].recv(use_pickle=False)
enc_d = ts.ckks_vector_from(public_context, enc_d)
elif encryption_method == "paillier":
enc_d = self.dual_channels["intermediate_label_trainer"].recv(use_pickle=False)
enc_d = Paillier.ciphertext_from(public_context, enc_d)
elif encryption_method == "plain":
enc_d = self.dual_channels["intermediate_label_trainer"].recv()
# calculate gradient for trainer and send to assist_trainer
logger.info("Calculate gradients for trainer.")
if encryption_method == "ckks":
enc_regular_gradient_tmp = ts.ckks_vector(public_context,
regular_gradient_tmp.numpy().astype(np.float32).flatten())
elif encryption_method == "paillier":
enc_regular_gradient_tmp = Paillier.encrypt(
public_context, regular_gradient_tmp.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True, num_cores=num_cores)
elif encryption_method == "plain":
enc_regular_gradient_tmp = regular_gradient_tmp.numpy().astype(np.float32).flatten()
if encryption_method == "ckks":
gradient_trainer_w = enc_d.matmul(x_batch[0].numpy()) + enc_regular_gradient_tmp
else:
gradient_trainer_w = np.matmul(enc_d.reshape(1, len(enc_d)), x_batch[0].numpy()
) + enc_regular_gradient_tmp
# add noise to encrypted gradients and send to assist_trainer
if encryption_method == "ckks":
logger.info("Calculate noised gradient for trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch[0].shape[1])],
dtype=np.float32)
noise /= 100000
noised_gradient_trainer_w = gradient_trainer_w + noise
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(noised_gradient_trainer_w.serialize(), use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_gradient_trainer_w = self.dual_channels["gradients_loss"].recv()
gradient_trainer_w = noised_gradient_trainer_w - noise
elif encryption_method == "paillier":
logger.info("Calculate noised gradient for trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch[0].shape[1])],
dtype=np.float32)
noise /= 100000
noised_gradient_trainer_w = gradient_trainer_w + noise
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_trainer_w),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_gradient_trainer_w = self.dual_channels["gradients_loss"].recv()
gradient_trainer_w = noised_gradient_trainer_w - noise
# gradient_trainer_w = torch.FloatTensor(gradient_trainer_w).unsqueeze(-1)
# update w and b of trainer
gradient_trainer_w = gradient_trainer_w / x_batch[0].shape[0]
logger.info("Update weights of trainer.")
self.model.linear.weight -= (torch.FloatTensor(gradient_trainer_w) * self.optimizer_config["lr"])
# predict train and val for metrics
logger.info("Predict train weights of trainer.")
self.predict(self.train_dataloader)
logger.info("Predict val weights of trainer.")
self.predict(self.val_dataloader)
# receive flags
early_stop_flag, best_model_flag, patient = self.dual_channels["intermediate_label_trainer"].recv(
use_pickle=True)
# update best model
if best_model_flag:
self.best_model = copy.deepcopy(self.model)
# if need to save results by epoch
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
# ModelPreserver.save(save_dir=self.save_dir,
# model_name=self.save_model_name,
# state_dict=self.model.state_dict(),
# epoch=epoch)
self.save_model(epoch=epoch)
# if early stopping, break
if early_stop_flag:
break
# save model for infer
# if not early stopping, save model
self.save_model(epoch=None)
# send w to label trainer
self._save_feature_importance(self.dual_channels["intermediate_label_trainer"])
def save_model(self, epoch: Optional[int] = None):
if not epoch:
save_model_config(stage_model_config=self.export_conf,
save_path=Path(self.save_dir))
if self.save_model_name:
ModelIO.save_torch_model(
state_dict=self.best_model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
meta_dict={},
epoch=epoch
)
if self.save_onnx_model_name:
ModelIO.save_torch_onnx(
model=self.best_model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
epoch=epoch
)
def _save_feature_importance(self, channel):
channel.send((FedNode.node_id, self.best_model.state_dict()["linear.weight"][0]))
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com", ids=[FedConfig.node_id] + FedConfig.get_trainer())
dim_channel.send(self.data_dim)
| 16,611 | 54.373333 | 120 | py |
XFL | XFL-master/python/algorithm/framework/vertical/kmeans/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
from itertools import chain
from pathlib import Path
import warnings
import numpy as np
import pandas as pd
import pyspark.pandas as ps
import torch
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
warnings.filterwarnings("ignore")
class VerticalKmeansBase(TrainConfigParser):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""
init
Args:
train_conf:
label:
*args:
**kwargs:
"""
super().__init__(train_conf)
self.k = 0
self.max_iter = 0
self.tol = 0.0
self.is_converged = False
self.init = "random"
self.encryption = "plain"
self.label = label
self.cluster_centers = []
self.cluster_count_list = []
self.train_features, self.train_label, self.train_ids = None, None, None
self._init_data()
self._init_config()
def _init_data(self):
logger.info("init data loader.")
if not self.input_trainset:
return None
input_info = self.input_trainset[0]
file_path = str(Path(input_info.get("path"), input_info.get("name")))
type_ = input_info.get("type", "None")
if input_info.get("has_id", True):
index_col = input_info.get("index_col", 'id')
else:
index_col = None
if input_info.get("has_label", True):
label_name = input_info.get("label_name", 'y')
self.label = True
else:
label_name = None
self.label = False
if type_ == "csv":
if self.computing_engine == "local":
df = pd.read_csv(file_path, index_col=index_col)
elif self.computing_engine == "spark":
df = ps.read_csv(file_path, index_col=index_col)
else:
raise NotImplementedError("Computing engine {} is not supported.".format(self.computing_engine))
else:
raise NotImplementedError("Dataset type {} is not supported.".format(type_))
if self.label:
feature_cols = [_ for _ in df.columns if _ != label_name]
self.train_features = df[feature_cols]
if label_name:
self.train_label = df[label_name]
else:
self.train_label = None
else:
self.train_features = df
self.train_ids = df.index
def _init_config(self):
"""
Initialize model parameters
Returns:
"""
params = self.train_info.get("train_params", {})
self.k = params.get("k", 5)
self.init = params.get("init", "random")
self.max_iter = params.get("max_iter", 20)
self.tol = params.get("tol", 1e-5)
self.random_seed = params.get("extra_config", {}).get("random_seed", 2022)
if self.identity != "assist_trainer":
self._check()
self.encryption = params.get("encryption")
def _check(self):
"""
Check data and parameters
Returns:
"""
if len(self.train_features) <= 0:
raise ValueError("error: empty dataset.")
if self.k < 2:
raise ValueError("k must be an integer value larger than 1.")
elif self.k > len(self.train_features):
raise ValueError("k is larger than the size of current data.")
@staticmethod
def euclid_distance(u, center_list):
result = []
for i in range(len(center_list)):
result.append(sum(np.square(center_list[i] - u)))
return result
def distance_table(self, centers):
"""
Args:
centers: cluster centroids
Returns:
(n * k) tensor, whose [i, j] element is square of the distance of sample i to the centroid j.
"""
if isinstance(centers, ps.DataFrame):
centers = centers.to_numpy()
elif isinstance(centers, pd.DataFrame):
centers = centers.to_numpy()
elif isinstance(centers, list):
centers = np.array(centers)
n = len(self.train_features)
if self.train_features.empty:
return
d = functools.partial(self.euclid_distance, center_list=centers)
dt = self.train_features.apply(d, axis=1)
return torch.Tensor(list(chain.from_iterable(dt.to_numpy()))).reshape(n, len(centers))
@staticmethod
def distance_between_centers(center_list):
cluster_dist_list = []
for i in range(0, len(center_list)):
for j in range(0, len(center_list)):
if j != i:
cluster_dist_list.append(np.sum((np.array(center_list[i]) - np.array(center_list[j])) ** 2))
return torch.Tensor(cluster_dist_list)
def calc_centers(self, centers, cluster_result):
"""
Update cluster centers based on clustering results
Args:
centers: current center slice
cluster_result: result of clustering labels
Returns:
"""
feature_sum = {}
feature_count = {}
for feature, label in zip(self.train_features.values, cluster_result):
if label not in feature_sum:
feature_sum[label] = copy.deepcopy(feature)
else:
feature_sum[label] += feature
feature_count[label] = feature_count.get(label, 0) + 1
center_list = []
# for k in centroid_feature_sum:
for k in range(self.k):
if k not in feature_sum:
if isinstance(centers, ps.DataFrame):
center_list.append(centers.iloc[k])
elif isinstance(centers, pd.DataFrame):
center_list.append(centers.iloc[k])
elif isinstance(centers, list):
center_list.append(centers[k])
else:
raise NotImplementedError
else:
count = feature_count[k]
center_list.append(feature_sum[k] / count)
return center_list
def calc_cluster_count(self, cluster_result):
"""
Args:
cluster_result: result of clustering labels
Returns:
"""
feature_count = {}
for label in cluster_result:
feature_count[label] = feature_count.get(label, 0) + 1
cluster_count_list = []
count_all = len(cluster_result)
for k in range(self.k):
if k not in feature_count:
cluster_count_list.append([k, 0, 0])
else:
count = feature_count[k]
cluster_count_list.append([k, count, count / count_all])
return cluster_count_list
@staticmethod
def calc_tolerance(centers, centers_new):
"""
Calculate convergence metrics
Returns:
"""
if isinstance(centers, ps.DataFrame):
centers = centers.to_numpy()
elif isinstance(centers, pd.DataFrame):
centers = centers.to_numpy()
elif isinstance(centers, list):
centers = np.array(centers)
return np.sum(np.sum((centers - np.array(centers_new)) ** 2, axis=1))
| 6,602 | 26.627615 | 100 | py |
XFL | XFL-master/python/algorithm/framework/vertical/kmeans/table_agg_otp.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from functools import reduce
from itertools import combinations
from typing import Dict, OrderedDict, Tuple
import numpy as np
import pandas as pd
from common.communication.gRPC.python.commu import Commu
from common.crypto.csprng.drbg import get_drbg_inst
from common.crypto.csprng.drbg_base import DRBGBase
from common.crypto.key_agreement.diffie_hellman import DiffieHellman
from common.crypto.one_time_pad.component import OneTimePadCiphertext, OneTimePadContext, OneTimeKey
from common.crypto.one_time_pad.one_time_add import OneTimeAdd
from .table_agg_base import TableAggregatorAbstractAssistTrainer, TableAggregatorAbstractTrainer
def split_bytes(x: bytes, out_shape: Tuple[int]):
if len(out_shape) == 0:
return int.from_bytes(x, 'big')
elif len(out_shape) == 1:
a = len(x) // out_shape[0]
return [int.from_bytes(x[a * i: a * (i + 1)], 'big') for i in range(out_shape[0])]
else:
a = len(x) // out_shape[0]
return [split_bytes(x[a * i: a * (i + 1)], out_shape[1:]) for i in range(out_shape[0])]
class TableAggregatorOTPTrainer(TableAggregatorAbstractTrainer):
def __init__(self, sec_conf: dict, trainer_ids: list, *args, **kwargs) -> None:
super().__init__(sec_conf=sec_conf, *args, **kwargs)
trainer_pairs = combinations(trainer_ids, 2)
# key exchange
key_exchange_conf = sec_conf["key_exchange"]
df_protocols: Dict[str, DiffieHellman] = {}
for _trainer_ids in trainer_pairs:
if Commu.node_id in _trainer_ids:
df_protocol = DiffieHellman(list(_trainer_ids),
key_bitlength=key_exchange_conf['key_bitlength'],
optimized=key_exchange_conf["optimized"],
channel_name="otp_diffie_hellman")
df_protocols[df_protocol.chan.remote_id] = df_protocol
entropys: Dict[str, bytes] = {remote_id: None for remote_id in df_protocols}
def func(id):
entropys[id] = df_protocols[id].exchange(out_bytes=True)
thread_list = []
for id in df_protocols:
task = threading.Thread(target=func, args=(id,))
thread_list.append(task)
for task in thread_list:
task.start()
for task in thread_list:
task.join()
csprng_conf = sec_conf["csprng"]
self.csprngs: OrderedDict[str, DRBGBase] = OrderedDict()
self.is_addition = []
for remote_id in trainer_ids:
if remote_id != Commu.node_id:
self.csprngs[remote_id] = get_drbg_inst(name=csprng_conf["name"],
entropy=entropys[remote_id],
method=csprng_conf["method"],
nonce=b'',
additional_data=b'')
self.is_addition.append(Commu.node_id < remote_id)
# one-time-pad
self.otp_context = OneTimePadContext(modulus_exp=sec_conf["key_bitlength"],
data_type=sec_conf["data_type"])
def send(self, table: pd.Series) -> None:
"""
Args:
table:
Returns:
"""
def f(t):
return reduce(lambda x, y: x * y, t.shape, 1) * self.otp_context.modulus_exp // 8
if table is None:
self.broadcast_chan.send(value=None)
return
num_bytes_array = list(map(f, table))
csprng_generators = []
for remote_id in self.csprngs:
generator = self.csprngs[remote_id].generator(num_bytes=num_bytes_array,
additional_data=b'')
csprng_generators.append(generator)
one_time_key = []
for g in csprng_generators:
x = bytearray(next(g))
y = split_bytes(x, table.shape)
one_time_key.append(np.array(y))
one_time_key = OneTimeKey(one_time_key, self.otp_context.modulus_exp)
encrypted_table = OneTimeAdd.encrypt(context_=self.otp_context,
data=table,
one_time_key=one_time_key,
is_addition=self.is_addition,
serialized=False).data
self.broadcast_chan.send(value=encrypted_table)
class TableAggregatorOTPAssistTrainer(TableAggregatorAbstractAssistTrainer):
def __init__(self, sec_conf: dict, *args, **kwargs) -> None:
super().__init__(sec_conf=sec_conf, *args, **kwargs)
self.otp_context = OneTimePadContext(modulus_exp=sec_conf["key_bitlength"],
data_type=sec_conf["data_type"])
def aggregate(self) -> pd.Series:
message = self.broadcast_chan.collect()
ciphertext = None
for table in message:
if table is None:
continue
if ciphertext is None:
ciphertext = OneTimePadCiphertext(data=table, context_=self.otp_context)
else:
ciphertext += OneTimePadCiphertext(data=table, context_=self.otp_context)
ret = ciphertext.decode()
return ret
| 6,067 | 39.18543 | 100 | py |
XFL | XFL-master/python/algorithm/framework/vertical/kmeans/table_agg_plain.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import pandas as pd
from .table_agg_base import TableAggregatorAbstractAssistTrainer
from .table_agg_base import TableAggregatorAbstractTrainer
class TableAggregatorPlainTrainer(TableAggregatorAbstractTrainer):
def __init__(self, sec_conf: dict, *args, **kwargs) -> None:
super().__init__(sec_conf=sec_conf, *args, **kwargs)
def send(self, table: pd.Series) -> None:
"""
Args:
table:
Returns:
"""
self.broadcast_chan.send(value=table)
class TableAggregatorPlainAssistTrainer(TableAggregatorAbstractAssistTrainer):
def __init__(self, sec_conf: dict, *args, **kwargs) -> None:
super().__init__(sec_conf=sec_conf, *args, **kwargs)
def aggregate(self) -> pd.Series:
message = self.broadcast_chan.collect()
ret = None
for table in message:
if table is None:
continue
if ret is None:
ret = copy.deepcopy(table)
else:
ret += table
return ret
| 1,684 | 29.636364 | 78 | py |
XFL | XFL-master/python/algorithm/framework/vertical/kmeans/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
from service.fed_config import FedConfig
from service.fed_node import FedNode
from common.communication.gRPC.python.channel import DualChannel
from .trainer import VerticalKmeansTrainer
class VerticalKmeansLabelTrainer(VerticalKmeansTrainer):
def __init__(self, train_conf: dict, *args, **kwargs):
"""
Args:
train_conf: training parameters
*args:
**kwargs:
"""
super().__init__(train_conf, *args, **kwargs)
def init_centers(self):
init_center_chan: Dict[str, DualChannel] = {}
for party_id in FedConfig.get_trainer():
init_center_chan[party_id] = DualChannel(
name="init_center_" + party_id, ids=[FedNode.node_id, party_id]
)
self.channels["init_center"] = init_center_chan
if self.init == "random":
center_ids = list(np.random.choice(len(self.train_features), self.k, replace=False))
for party_id in FedConfig.get_trainer():
self.channels["init_center"][party_id].send(center_ids)
return center_ids
elif self.init == "kmeans++":
self.channels["init_prob"] = DualChannel(name="init_prob",
ids=[FedConfig.get_assist_trainer(), FedNode.node_id])
center_ids = []
while len(center_ids) < self.k:
m = len(self.train_features)
if len(center_ids) < 1:
center_ids.append(np.random.choice(m))
else:
dist_table = self.distance_table(self.train_features.iloc[center_ids])
self.table_agg_executor.send(dist_table)
p = self.channels["init_prob"].recv()
p[center_ids] = 0
p = np.array(p / p.sum())
center_ids.append(np.random.choice(m, p=p))
for party_id in FedConfig.get_trainer():
self.channels["init_center"][party_id].send(center_ids)
return center_ids
| 2,731 | 39.776119 | 107 | py |
XFL | XFL-master/python/algorithm/framework/vertical/kmeans/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
from common.communication.gRPC.python.channel import DualChannel
from common.evaluation.metrics import ClusteringMetric
from common.utils.logger import logger
from service.fed_config import FedConfig
from service.fed_node import FedNode
from service.fed_control import ProgressCalculator
from .api import get_table_agg_scheduler_inst
from .base import VerticalKmeansBase
class VerticalKmeansAssistTrainer(VerticalKmeansBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""
Args:
train_conf:
*args:
**kwargs:
"""
self.channels = {}
self.party_id_list = FedConfig.get_label_trainer() + FedConfig.get_trainer()
sync_chann: Dict[str, DualChannel] = {}
for party_id in self.party_id_list:
sync_chann[party_id] = DualChannel(
name="sync_" + party_id, ids=[FedNode.node_id, party_id])
self.channels["sync"] = sync_chann
conf = self._sync_config()
train_conf.update(conf)
super().__init__(train_conf, label=True, *args, **kwargs)
self.progress_calculator = ProgressCalculator(self.max_iter)
self.DBI = None
self.dist_sum = None
cluster_res_chan: Dict[str, DualChannel] = {}
tolerance_chan: Dict[str, DualChannel] = {}
converged_flag_chan: Dict[str, DualChannel] = {}
check_data_com: Dict[str, DualChannel] = {}
for party_id in self.party_id_list:
cluster_res_chan[party_id] = DualChannel(
name="cluster_res_" + party_id, ids=[FedNode.node_id, party_id])
tolerance_chan[party_id] = DualChannel(
name="tolerance_" + party_id, ids=[FedNode.node_id, party_id])
converged_flag_chan[party_id] = DualChannel(
name="converged_flag_" + party_id, ids=[FedNode.node_id, party_id]
)
check_data_com[party_id] = DualChannel(
name="check_data_com_" + party_id, ids=[FedNode.node_id, party_id]
)
self.channels["cluster_result"] = cluster_res_chan
self.channels["tolerance"] = tolerance_chan
self.channels["converged_flag"] = converged_flag_chan
self.channels["check_data_com"] = check_data_com
self.dist_table_agg_executor = get_table_agg_scheduler_inst(
sec_conf=self.encryption, trainer_ids=FedConfig.get_label_trainer() +
FedConfig.get_trainer()
)
def _sync_config(self):
def count_key(conf):
if isinstance(conf, dict):
num = len(conf.keys())
for k, v in conf.items():
num += count_key(v)
return num
else:
return 0
conf_to_update = {}
max_key_num = 0
for party_id in self.party_id_list:
conf = self.channels["sync"][party_id].recv()
num = count_key(conf)
if num >= max_key_num:
conf_to_update = conf
max_key_num = num
for party_id in self.party_id_list:
self.channels["sync"][party_id].send(conf_to_update)
return conf_to_update
def init_centers(self):
if self.init == "kmeans++":
self.channels["init_prob"] = DualChannel(name="init_prob",
ids=[FedNode.node_id] + FedConfig.get_label_trainer())
for i in range(1, self.k):
dist_sum = self.dist_table_agg_executor.aggregate()
p = (dist_sum ** 0.5).sum(axis=1)
self.channels["init_prob"].send(p)
@staticmethod
def get_cluster(dist_sum):
"""
Assign clustering results
Args:
dist_sum:
Returns:
"""
labels = np.argmin(dist_sum, axis=1)
return np.array(labels)
def check_data(self):
lens, n_features = None, 0
for party_id in self.party_id_list:
recv = self.channels["check_data_com"][party_id].recv()
m, n = recv
if lens is None:
lens = m
elif lens != m:
raise ValueError("Lengths of the train set mismatched.")
n_features += n
if n_features <= 0:
raise ValueError("Number of the feature is zero. Stop training.")
def fit(self):
logger.info("vertical K-means scheduler training start.")
self.check_data()
self.init_centers()
logger.info("{}::initialized centers.".format(self.identity))
for iter_ in range(self.max_iter):
if iter_ <= 0:
self.dist_sum = self.dist_table_agg_executor.aggregate()
cluster_result = self.get_cluster(self.dist_sum)
for party_id in self.party_id_list:
self.channels["cluster_result"][party_id].send(cluster_result)
self.cluster_count_list = self.calc_cluster_count(cluster_result)
tol_list = []
for party_id in self.party_id_list:
tol_list.append(self.channels["tolerance"][party_id].recv())
tol_sum = sum(tol_list)
logger.info("iter: {}, tol: {}.".format(iter_, tol_sum))
self.is_converged = True if tol_sum < self.tol else False
for party_id in self.party_id_list:
self.channels["converged_flag"][party_id].send(
self.is_converged)
self.dist_sum = self.dist_table_agg_executor.aggregate()
self._calc_metrics(self.dist_sum, cluster_result, iter_)
# calculate and update the progress of the training
self.progress_calculator.cal_custom_progress(iter_ + 1)
if self.is_converged:
if iter_ + 1 < self.max_iter:
# update the progress of 100 to show the training is finished
ProgressCalculator.finish_progress()
break
def _calc_metrics(self, dist_sum, cluster_result, epoch):
self.calc_dbi(dist_sum, cluster_result, epoch)
def calc_dbi(self, dist_sum, cluster_result, epoch):
dist_table = self.calc_ave_dist(dist_sum, cluster_result)
if len(dist_table) == 1:
raise ValueError(
"DBI calculation error: All data are clustered into one group.")
center_dist = self.dist_table_agg_executor.aggregate()
cluster_avg_intra_dist = []
for i in range(len(dist_table)):
cluster_avg_intra_dist.append(dist_table[i][2])
self.DBI = ClusteringMetric.calc_dbi(
cluster_avg_intra_dist, center_dist)
logger.info("epoch %d: dbi score=%.6g" % (epoch, self.DBI))
def calc_ave_dist(self, dist_sum, cluster_result):
distances_centers = dict()
for vec, label in zip(dist_sum, cluster_result):
if label not in distances_centers:
distances_centers[label] = np.sqrt(vec[label])
else:
distances_centers[label] += np.sqrt(vec[label])
calc_ave_dist_list = []
for label in range(len(self.cluster_count_list)):
count = self.cluster_count_list[label][1]
if label not in distances_centers:
calc_ave_dist_list.append([label, count, np.nan])
else:
calc_ave_dist_list.append(
[label, count, distances_centers[label] / count])
return calc_ave_dist_list
| 8,199 | 36.788018 | 107 | py |
XFL | XFL-master/python/algorithm/framework/vertical/kmeans/api.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from .table_agg_otp import TableAggregatorOTPAssistTrainer, TableAggregatorOTPTrainer
from .table_agg_plain import TableAggregatorPlainAssistTrainer, TableAggregatorPlainTrainer
def _get_table_agg_inst(role: str, sec_conf: dict, *args, **kwargs) -> Union[TableAggregatorPlainTrainer,
TableAggregatorPlainAssistTrainer]:
""" get a proper TableAggregator instance. role: "trainer" or "scheduler"
"""
if sec_conf is None or not sec_conf: # sec_conf may be None or {}
method = "plain"
sec_conf = {
"plain": {}
}
else:
method = list(sec_conf.keys())[0]
opt = {
"otp": {
"trainer": TableAggregatorOTPTrainer,
"scheduler": TableAggregatorOTPAssistTrainer
},
"plain": {
"trainer": TableAggregatorPlainTrainer,
"scheduler": TableAggregatorPlainAssistTrainer
}
}
try:
return opt[method][role](sec_conf[method], *args, **kwargs)
except KeyError as e:
raise KeyError("Combination of method {} and role {} is not supported "
"for creating TableAggregator instance".format(method, role)) from e
except Exception as e:
raise e
def get_table_agg_scheduler_inst(sec_conf: dict, *args, **kwargs) -> Union[TableAggregatorPlainAssistTrainer,
TableAggregatorOTPAssistTrainer]:
return _get_table_agg_inst('scheduler', sec_conf, *args, **kwargs)
def get_table_agg_trainer_inst(sec_conf: dict, *args, **kwargs) -> Union[TableAggregatorPlainTrainer,
TableAggregatorOTPTrainer]:
return _get_table_agg_inst('trainer', sec_conf, *args, **kwargs)
| 2,500 | 38.698413 | 112 | py |
XFL | XFL-master/python/algorithm/framework/vertical/kmeans/table_agg_base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pandas as pd
from common.communication.gRPC.python.channel import BroadcastChannel
from common.communication.gRPC.python.commu import Commu
from service.fed_config import FedConfig
class TableAggregatorAbstractTrainer(object):
__metaclass__ = abc.ABCMeta
def __init__(self, sec_conf: dict = None, *args, **kwargs) -> None:
self.sec_conf = sec_conf
self.broadcast_chan = BroadcastChannel(name='table',
ids=Commu.trainer_ids,
root_id=FedConfig.get_assist_trainer(),
auto_offset=True)
def send(self, table: pd.Series) -> None:
pass
class TableAggregatorAbstractAssistTrainer(object):
__metaclass__ = abc.ABCMeta
def __init__(self, sec_conf: dict = None, *args, **kwargs) -> None:
self.sec_conf = sec_conf
self.broadcast_chan = BroadcastChannel(name='table',
ids=Commu.trainer_ids,
root_id=FedConfig.get_assist_trainer(),
auto_offset=True)
def aggregate(self) -> pd.Series:
pass
| 1,867 | 35.627451 | 86 | py |
XFL | XFL-master/python/algorithm/framework/vertical/kmeans/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/vertical/kmeans/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import numpy as np
import pandas as pd
from common.checker.matcher import get_matched_config
from common.checker.x_types import All
from common.communication.gRPC.python.channel import DualChannel
from common.utils.logger import logger
from common.utils.utils import update_dict
from common.utils.model_io import ModelIO
from service.fed_config import FedConfig
from service.fed_node import FedNode
from .api import get_table_agg_trainer_inst
from .base import VerticalKmeansBase
class VerticalKmeansTrainer(VerticalKmeansBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""
Args:
train_conf:
*args:
**kwargs:
"""
self.channels = {}
self.channels["sync"] = DualChannel(
name="sync_" + FedNode.node_id, ids=[FedConfig.get_assist_trainer(), FedNode.node_id]
)
conf = self._sync_config(train_conf)
update_dict(train_conf, conf)
super().__init__(train_conf, label=False, *args, **kwargs)
self.dist_table = None
self.cluster_result = None
self.local_tol = 0.0
self.channels["cluster_result"] = DualChannel(
name="cluster_res_" + FedNode.node_id, ids=[FedConfig.get_assist_trainer(), FedNode.node_id]
)
self.channels["tolerance"] = DualChannel(
name="tolerance_" + FedNode.node_id, ids=[FedConfig.get_assist_trainer(), FedNode.node_id]
)
self.channels["converged_flag"] = DualChannel(
name="converged_flag_" + FedNode.node_id, ids=[FedConfig.get_assist_trainer(), FedNode.node_id]
)
self.channels["check_data_com"] = DualChannel(
name="check_data_com_" + FedNode.node_id, ids=[FedConfig.get_assist_trainer(), FedNode.node_id]
)
self.table_agg_executor = get_table_agg_trainer_inst(
sec_conf=self.encryption, trainer_ids=FedConfig.get_label_trainer() +
FedConfig.get_trainer()
)
def _sync_config(self, config):
sync_rule = {
"train_info": All()
}
config_to_sync = get_matched_config(config, sync_rule)
self.channels["sync"].send(config_to_sync)
conf = self.channels["sync"].recv()
return conf
def init_centers(self):
"""
Initialize cluster centers
Returns:
"""
self.channels["init_center"] = DualChannel(
name="init_center_" + FedNode.node_id, ids=[FedNode.node_id] + FedConfig.get_label_trainer()
)
if self.init == "random":
center_ids = self.channels["init_center"].recv()
return center_ids
elif self.init == "kmeans++":
center_ids = []
while len(center_ids) < self.k:
if len(center_ids) >= 1:
dist_table = self.distance_table(self.train_features.iloc[center_ids])
self.table_agg_executor.send(dist_table)
center_ids = self.channels["init_center"].recv()
return center_ids
def check_data(self):
m, n = len(self.train_ids), len(self.train_features.columns)
self.channels["check_data_com"].send((m, n))
def fit(self):
logger.info("vertical K-means trainer start.")
self.check_data()
np.random.seed(self.random_seed)
center_ids = self.init_centers()
logger.info("{}::initialized centers.".format(self.identity))
self.cluster_centers = self.train_features.iloc[center_ids]
iter_ = 0
for iter_ in range(self.max_iter):
if iter_ <= 0:
self.dist_table = self.distance_table(self.cluster_centers)
self.table_agg_executor.send(self.dist_table)
self.cluster_result = self.channels["cluster_result"].recv()
centers = self.calc_centers(
self.cluster_centers, self.cluster_result)
self.local_tol = self.calc_tolerance(self.cluster_centers, centers)
self.channels["tolerance"].send(self.local_tol)
self.is_converged = self.channels["converged_flag"].recv()
self.cluster_centers = centers
self.dist_table = self.distance_table(self.cluster_centers)
self.table_agg_executor.send(self.dist_table)
center_dist = self.distance_between_centers(self.cluster_centers)
self.table_agg_executor.send(center_dist)
if self.is_converged:
break
self.save(epoch=iter_, final=True)
def save(self, epoch: int = None, final: bool = False):
"""
Args:
epoch:
final:
Returns:
"""
save_dir = str(Path(self.output.get("path")))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_name = self.output.get("model", {}).get("name", "")
cluster_centers = [list(i) for i in self.cluster_centers]
kmeans_output = {
"k": self.k,
"iter": epoch,
"is_converged": self.is_converged,
"tol": self.tol,
"cluster_centers": cluster_centers,
}
ModelIO.save_json_model(
model_dict=kmeans_output,
save_dir=save_dir,
model_name=model_name,
)
result_dataframe = pd.DataFrame(
{
"id": self.train_ids.to_numpy(),
"cluster_result": self.cluster_result
}
)
result_name = self.output.get("result", {}).get("name", "")
result_path = Path(save_dir, result_name)
result_dataframe.to_csv(result_path, header=True, index=False)
logger.info("result saved as: {}.".format(result_path))
summary_df = result_dataframe.groupby(
"cluster_result").size().to_frame("count")
summary_df = summary_df.reset_index()
summary_name = self.output.get("summary", {}).get("name", "")
summary_path = Path(save_dir, summary_name)
summary_df.to_csv(summary_path, header=True, index=False)
logger.info("summary info saved to: {}.".format(summary_path))
| 5,876 | 30.260638 | 98 | py |
XFL | XFL-master/python/algorithm/framework/vertical/xgboost/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any
import numpy as np
import pandas as pd
from algorithm.core.data_io import CsvReader, NdarrayIterator
from algorithm.core.encryption_param import get_encryption_param
from algorithm.core.tree.cat_param_parser import parse_category_param
from algorithm.core.tree.tree_param import XGBTreeParam
from algorithm.framework.vertical.vertical_model_base import VerticalModelBase
from common.utils.logger import logger
class VerticalXgboostBase(VerticalModelBase):
def __init__(self, train_conf: dict, is_label_trainer: bool = False, *args, **kwargs):
super().__init__(train_conf)
self.train_conf = train_conf
self.train_features, self.train_label, self.train_ids = None, None, None
self.val_features, self.val_label, self.val_ids = None, None, None
self.test_features, self.test_label, self.test_ids = None, None, None
self.xgb_config = None
self.is_label_trainer = is_label_trainer
self.feature_importances_ = {}
self.__init_xgb_config()
self.__init_data()
self.__convert_to_binned_data()
def __init_data(self) -> None:
""" Init data, include features and label.
Returns: None
"""
self.bs = self.train_params.get("batch_size_val")
if self.input_trainset:
_ = self.__load_data(self.input_trainset)
self.train_features, self.train_label, self.train_ids, self.train_names = _
self.train_dataset = NdarrayIterator(self.train_features.to_numpy(), self.bs)
else:
self.train_dataset = None
if self.input_valset:
_ = self.__load_data(self.input_valset)
self.val_features, self.val_label, self.val_ids, self.val_names = _
self.val_dataset = NdarrayIterator(self.val_features.to_numpy(), self.bs)
else:
self.val_dataset = None
if self.input_testset:
_ = self.__load_data(self.input_testset)
self.test_features, self.test_label, self.test_ids, self.test_names = _
self.test_dataset = NdarrayIterator(self.test_features.to_numpy(), self.bs)
else:
self.test_dataset = None
def __convert_to_binned_data(self):
''' Note self.train_features will be converted to binned feature '''
cat_columns = parse_category_param(self.train_features,
col_index=self.xgb_config.cat_col_index,
col_names=self.xgb_config.cat_col_names,
col_index_type=self.xgb_config.cat_col_index_type,
col_names_type=self.xgb_config.cat_col_names_type,
max_num_value=self.xgb_config.cat_max_num_value,
max_num_value_type=self.xgb_config.cat_max_num_value_type)
self.cat_columns = cat_columns
self.cat_feature_names = []
if len(cat_columns) > 0:
self.cat_feature_names = self.train_features.columns[cat_columns].to_list()
self.train_features[self.cat_feature_names] = self.train_features[self.cat_feature_names].astype('category')
def f(x):
if self.train_features[x].dtypes == "category":
value_counts = self.train_features[x].value_counts() # descending order
if value_counts.shape[0] > self.xgb_config.num_bins:
values = value_counts.index.to_list()
list_unique = values[:self.xgb_config.num_bins - 1]
list_group = values[self.xgb_config.num_bins - 1:]
uniques = np.array(list_unique + [list_group], dtype=object)
value_map = {v: i for i, v in enumerate(list_unique)}
value_map.update({v: len(list_unique) for v in list_group})
codes = self.train_features[x].map(value_map)
else:
codes, uniques = pd.factorize(self.train_features[x], na_sentinel=0) # na_sentinel will not be activated actually
uniques = uniques.to_numpy()
# uniques: array of values that belongs to the same category
# codes: binned values
return pd.Series(codes, name=x), uniques.tolist()
else:
binned_values, split_points = pd.cut(self.train_features[x], bins=self.xgb_config.num_bins, retbins=True, labels=range(self.xgb_config.num_bins))
return binned_values, split_points
if self.input_trainset:
out = pd.Series(self.train_features.columns).apply(f)
if self.xgb_config.num_bins <= 256:
dtype = np.uint8
elif self.xgb_config.num_bins <= 2 ** 16:
dtype = np.uint16
else:
dtype = np.uint32
self.train_features = pd.DataFrame([out[i][0] for i in range(len(out))], dtype=dtype).T
# For continuous features, self.split_points stores the split points between bins, for example, 15 split points for 16 bins.
# For categorial features, self.split_points stores original values correspond to the bin values, for example, 16 values for 16 bins.
self.split_points = [out[i][1][1:-1] if i not in self.cat_columns else out[i][1][:] for i in range(len(out))]
def __load_data(self, config):
""" Load data from dataset config.
Args:
config: Dataset config.
Returns: [CsvReader, ...]
"""
if len(config) > 1:
logger.warning("More than one dataset is not supported.")
if not config:
return None, None, None, None
config = config[0]
if config["type"] == "csv":
path = os.path.join(config["path"], config["name"])
if not path:
return None, None, None
data_reader = CsvReader(path, has_id=config["has_id"], has_label=config["has_label"])
features = data_reader.features(type="pandas.dataframe")
features.replace({np.nan: 0, self.xgb_config.missing_value: 0}, inplace=True)
ids = data_reader.ids
names = data_reader.feature_names()
if self.is_label_trainer:
labels = data_reader.label()
else:
labels = None
else:
raise NotImplementedError("Dataset type {} is not supported.".format(config["type"]))
return features, labels, ids, names
def col_sample(self) -> tuple[Any, dict]:
col_size = self.train_features.shape[1]
if 0 < self.xgb_config.subsample_feature_rate <= 1:
sample_num = int(col_size * self.xgb_config.subsample_feature_rate)
else:
sample_num = col_size
sampled_idx = np.sort(np.random.choice(col_size, sample_num, replace=False))
feature_id_mapping = {a: b for a, b in enumerate(sampled_idx)}
sampled_features = self.train_features.iloc[:, sampled_idx]
return sampled_features, feature_id_mapping
def __init_xgb_config(self) -> None:
""" Init xgboost config.
Returns: None
"""
default_config = self.train_info.get("train_params")
cat_params = default_config.get("category", {}).get("cat_features", {})
encryption_methods = list(default_config.get("encryption", {}).keys())
if len(encryption_methods) > 0:
encryption_method = encryption_methods[0]
else:
encryption_method = "plain"
encryption_params = default_config.get("encryption", {"plain": {}})[encryption_method]
downsampling_params = default_config.get("downsampling", {})
self.xgb_config = XGBTreeParam(loss_param=default_config.get("lossfunc"), # ("BCEWithLogitsLoss"),
num_trees=default_config.get("num_trees"),
learning_rate=default_config.get("learning_rate"),
gamma=default_config.get("gamma"),
lambda_=default_config.get("lambda_"),
max_depth=default_config.get("max_depth"),
num_bins=default_config.get("num_bins", 16),
min_split_gain=default_config.get("min_split_gain"),
min_sample_split=default_config.get("min_sample_split"),
min_leaf_node=default_config.get("min_leaf_node"),
feature_importance_type=default_config.get("feature_importance_type"),
run_goss=downsampling_params.get("row", {}).get("run_goss", False),
top_rate=downsampling_params.get("row", {}).get("top_rate"),
other_rate=downsampling_params.get("row", {}).get("other_rate"),
metrics=default_config.get("metric"),
early_stopping_param=default_config.get("early_stopping",
{"patience": -1,
"key": "ks",
"delta": 0.001}),
encryption_param=get_encryption_param(encryption_method, encryption_params),
subsample_feature_rate=downsampling_params.get("column", {}).get("rate", 1.0),
missing_value=float('inf'),
max_num_cores=default_config.get("max_num_cores", 999),
col_batch=default_config.get("advanced", {}).get("col_batch", 64),
row_batch=default_config.get("advanced", {}).get("row_batch", 40000),
cat_col_index=cat_params.get("col_index", ""),
cat_col_names=cat_params.get("col_names", []),
cat_max_num_value=cat_params.get("max_num_value", 0),
cat_col_index_type=cat_params.get("col_index_type", "inclusive"),
cat_col_names_type=cat_params.get("col_names_type", "inclusive"),
cat_max_num_value_type=cat_params.get("max_num_value_type", "union"),
cat_smooth=default_config.get("category", {}).get("cat_smooth", 1.0))
| 11,631 | 51.633484 | 161 | py |
XFL | XFL-master/python/algorithm/framework/vertical/xgboost/decision_tree_label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from pathos.pools import ThreadPool
from algorithm.core.encryption_param import PaillierParam, PlainParam
from algorithm.core.paillier_acceleration import embed, umbed
from algorithm.core.tree.big_feature import Feature
from algorithm.core.tree.feature_importance import FeatureImportance
from algorithm.core.tree.gain_calc import BestSplitInfo, cal_cat_rank, cal_gain, cal_weight
from algorithm.core.tree.goss import Goss
from algorithm.core.tree.tree_param import XGBTreeParam
from algorithm.core.tree.tree_structure import Node, SplitInfo, Tree
from algorithm.core.tree.xgboost_loss import get_xgb_loss_inst
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier, PaillierContext
from common.crypto.paillier.utils import get_core_num
from common.utils.constants import PAILLIER, PLAIN
from common.utils.logger import logger
from service.fed_config import FedConfig
from service.fed_control import ProgressCalculator
from .debug_params import EMBEDING
from service.fed_node import FedNode
class VerticalDecisionTreeLabelTrainer(object):
def __init__(self,
tree_param: XGBTreeParam,
y: np.ndarray,
y_pred: np.ndarray,
features: pd.DataFrame,
cat_columns: list,
split_points: np.ndarray,
channels: Dict[str, Union[BroadcastChannel, DualChannel]],
encryption_context: Optional[PaillierContext] = None,
feature_id_mapping: Optional[Dict[int, int]] = None,
tree_index: Optional[int] = None):
logger.info(
f"Label trainer decision tree {tree_index} initialize start.")
if tree_param.encryption_param.method not in [PAILLIER, PLAIN]:
raise ValueError(
f"Encryption method {tree_param.encryption_param.method} not supported.")
self.tree_param = tree_param
self.progress_calculator = ProgressCalculator(self.tree_param.num_trees, self.tree_param.max_depth)
self.y = y
self.y_pred = y_pred
self.cat_columns = cat_columns
self.split_points = split_points
self.party_id = FedConfig.node_id
self.max_num_cores = get_core_num(tree_param.max_num_cores)
self.tree_index = tree_index
loss_inst = get_xgb_loss_inst(list(self.tree_param.loss_param.keys())[0])
self.grad = loss_inst.cal_grad(
self.y, self.y_pred, after_prediction=True)
if tree_param.run_goss:
goss = Goss(tree_param.top_rate, tree_param.other_rate)
self.goss_selected_idx = goss.sampling(self.grad)
hess = loss_inst.cal_hess(
self.y[self.goss_selected_idx], self.y_pred[self.goss_selected_idx], after_prediction=True)
self.hess = np.zeros_like(self.grad)
self.hess[self.goss_selected_idx] = hess
goss.update_gradients(self.grad, self.hess)
else:
self.hess = loss_inst.cal_hess(self.y, self.y_pred, after_prediction=True)
self.goss_selected_idx = range(self.y.shape[0])
sample_index = self.goss_selected_idx
self.individual_grad_hess: BroadcastChannel = channels["individual_grad_hess"]
self.tree_node_chann: BroadcastChannel = channels["tree_node"]
self.summed_grad_hess_channs: Dict[str,
DualChannel] = channels["summed_grad_hess"]
self.min_split_info_channs: Dict[str,
DualChannel] = channels["min_split_info"]
self.sample_index_after_split_channs: Dict[str,
DualChannel] = channels["sample_index_after_split"]
encryption_param = self.tree_param.encryption_param
self.pri_context = encryption_context
self.feature_importance = {}
self.feature_importance_type = tree_param.feature_importance_type
self.feature_id_mapping = feature_id_mapping
if isinstance(encryption_param, PlainParam):
self.individual_grad_hess.broadcast(
[self.grad[sample_index], self.hess[sample_index]], use_pickle=True)
elif isinstance(encryption_param, PaillierParam):
num_cores = self.max_num_cores if encryption_param.parallelize_on else 1
if EMBEDING:
grad_hess = embed([self.grad[sample_index], self.hess[sample_index]], interval=(1 << 128), precision=64)
enc_grad_hess = Paillier.encrypt(context=self.pri_context,
data=grad_hess,
precision=0, # must be 0
obfuscation=True,
num_cores=num_cores)
self.individual_grad_hess.broadcast(Paillier.serialize(enc_grad_hess, compression=False),
use_pickle=True)
else:
enc_grad = Paillier.encrypt(context=self.pri_context,
data=self.grad[sample_index],
precision=encryption_param.precision,
obfuscation=True,
num_cores=num_cores)
enc_hess = Paillier.encrypt(context=self.pri_context,
data=self.hess[sample_index],
precision=encryption_param.precision,
obfuscation=True,
num_cores=num_cores)
self.individual_grad_hess.broadcast(
[Paillier.serialize(enc_grad, compression=False), Paillier.serialize(enc_hess, compression=False)],
use_pickle=True)
else:
raise ValueError("Encryption param not supported.")
if features.shape[1] == 0:
self.big_feature = None
else:
self.big_feature = Feature.create(values=features.iloc[sample_index, :],
sample_index=sample_index,
grad=self.grad[sample_index],
hess=self.hess[sample_index])
logger.info(
f"Label trainer decision tree {tree_index} initialize finished.")
def _cal_local_best_split(self, node: Node):
best_split_info = BestSplitInfo(feature_ower=self.party_id)
if node.sample_index is None or len(node.sample_index) == self.big_feature.data.shape[0]:
big_feature = self.big_feature
else:
big_feature = self.big_feature.slice_by_sample_index(node.sample_index)
res_hist_list = []
for col_name in big_feature.feature_columns:
res_hist_list.append(big_feature.data.groupby([col_name])[['xfl_grad', 'xfl_hess']].agg({'sum'})) # ({'count', 'sum'})
# for categorial features, resort
# cat column is count from the first col of cat feature
for feature_idx in self.cat_columns:
cat_rank = cal_cat_rank(res_hist_list[feature_idx][('xfl_grad', 'sum')],
res_hist_list[feature_idx][('xfl_hess', 'sum')],
self.tree_param.cat_smooth)
cat_rank.sort_values(inplace=True)
# index is saved in the Series's index
res_hist_list[feature_idx] = res_hist_list[feature_idx].loc[cat_rank.index.to_list()]
for feature_idx in range(len(res_hist_list)):
res_hist_list[feature_idx] = res_hist_list[feature_idx].cumsum(axis=0)
res_hist_list[feature_idx].rename(columns={"sum": "cum_sum"}, inplace=True)
cum_grad = res_hist_list[feature_idx][('xfl_grad', 'cum_sum')].to_numpy()
cum_hess = res_hist_list[feature_idx][('xfl_hess', 'cum_sum')].to_numpy()
gains = cal_gain(cum_grad, cum_hess, self.tree_param.lambda_)
if len(gains) == 1 and gains[0] == -np.inf:
continue
max_gain_index = np.argmax(gains)
max_gain = gains[max_gain_index].item()
if max_gain > best_split_info.gain:
best_split_info.gain = max_gain
best_split_info.feature_owner = self.party_id
best_split_info.feature_idx = self.feature_id_mapping[feature_idx].item()
# For categorial feature, split_point stores categories in left child branch
if feature_idx in self.cat_columns:
# It is not much precise if some categorial values are not be sampled
left_cat = res_hist_list[feature_idx].index.to_list()[:max_gain_index + 1]
best_split_info.left_cat = []
for cat in left_cat:
ori_cat = self.split_points[feature_idx][cat]
if isinstance(ori_cat, list):
best_split_info.left_cat += ori_cat
else:
best_split_info.left_cat.append(ori_cat)
best_split_info.split_point = None
best_split_info.is_category = True
filter = big_feature.data.iloc[:, feature_idx + 3].isin(left_cat)
else:
# Because of sampling
max_split_index = int(res_hist_list[feature_idx][('xfl_grad', 'cum_sum')].index[max_gain_index])
max_split_index = min(max_split_index, len(self.split_points[feature_idx]) - 1)
best_split_info.split_point = self.split_points[feature_idx][max_split_index]
best_split_info.left_cat = None
best_split_info.is_category = False
filter = big_feature.data.iloc[:, feature_idx + 3] <= max_split_index
best_split_info.left_sample_index = big_feature.data[filter]['xfl_id'].tolist()
best_split_info.right_sample_index = big_feature.data[~filter]['xfl_id'].tolist()
left_weight = cal_weight(cum_grad[max_gain_index],
cum_hess[max_gain_index],
self.tree_param.lambda_).item()
right_weight = cal_weight(cum_grad[-1] - cum_grad[max_gain_index],
cum_hess[-1] - cum_hess[max_gain_index],
self.tree_param.lambda_).item()
best_split_info.left_bin_weight = left_weight
best_split_info.right_bin_weight = right_weight
best_split_info.num_left_bin = len(best_split_info.left_sample_index)
best_split_info.num_right_bin = len(best_split_info.right_sample_index)
best_split_info.max_gain_index = max_gain_index # only valid for continuous feature
return best_split_info
def _cal_remote_best_split(self) -> Dict[str, BestSplitInfo]:
best_split_info_dict: Dict[str, BestSplitInfo] = {
party_id: BestSplitInfo(feature_ower=party_id) for party_id in self.summed_grad_hess_channs
}
gain_infos: Dict[str, list] = {
party_id: [] for party_id in self.summed_grad_hess_channs
}
is_continue_flags = np.array([True for party_id in self.summed_grad_hess_channs], dtype=bool)
def decrypt_hist(hist_list: List[np.ndarray], num_cores: int, out_origin: bool = True) -> list:
len_list = [len(item) for item in hist_list]
cum_len = np.cumsum([0] + len_list)
hist = np.concatenate(hist_list)
hist = Paillier.decrypt(self.pri_context, hist, num_cores=num_cores, out_origin=out_origin)
res = []
for i in range(len(cum_len) - 1):
res.append(hist[cum_len[i]: cum_len[i + 1]])
return res
while True:
for i, party_id in enumerate(self.summed_grad_hess_channs):
if not is_continue_flags[i]:
continue
data = self.summed_grad_hess_channs[party_id].recv(use_pickle=True, wait=False)
if data is None:
# Data has not been send, try it next round.
continue
is_continue, grad_hess_hist_list, remote_cat_index = data
if self.tree_param.encryption_param.method == PAILLIER:
if EMBEDING:
grad_hess_hist = []
count_hist_list = []
for item in grad_hess_hist_list:
grad_hess_hist.append(item[0])
count_hist_list.append(item[1])
grad_hess_hist = decrypt_hist(grad_hess_hist, num_cores=self.max_num_cores, out_origin=True)
grad_hist_list = []
hess_hist_list = []
for hist in grad_hess_hist:
a, b = umbed(hist, num=2, interval=(1 << 128), precison=64)
grad_hist_list.append(a)
hess_hist_list.append(b)
else:
grad_hist_list = []
hess_hist_list = []
count_hist_list = []
for item in grad_hess_hist_list:
grad_hist_list.append(item[0])
hess_hist_list.append(item[1])
count_hist_list.append(item[2])
grad_hist_list = decrypt_hist(grad_hist_list, num_cores=self.max_num_cores, out_origin=False)
hess_hist_list = decrypt_hist(hess_hist_list, num_cores=self.max_num_cores, out_origin=False)
else:
grad_hist_list = []
hess_hist_list = []
count_hist_list = []
for item in grad_hess_hist_list:
grad_hist_list.append(item[0])
hess_hist_list.append(item[1])
count_hist_list.append(item[2])
for idx in range(len(grad_hess_hist_list)):
grad_hist, hess_hist, count_hist = \
np.array(grad_hist_list[idx], dtype=np.float32), np.array(hess_hist_list[idx], dtype=np.float32), np.array(count_hist_list[idx])
# for categorial feature, resort
if idx in remote_cat_index:
cat_rank = cal_cat_rank(grad_hist, hess_hist, self.tree_param.cat_smooth)
cat_rank = np.argsort(cat_rank).tolist()
grad_hist = grad_hist[cat_rank]
hess_hist = hess_hist[cat_rank]
count_hist = count_hist[cat_rank]
else:
cat_rank = []
cum_grad_hist = np.cumsum(grad_hist)
cum_hess_hist = np.cumsum(hess_hist)
gains = cal_gain(cum_grad_hist, cum_hess_hist, self.tree_param.lambda_)
max_gain_index = np.argmax(gains)
max_gain = gains[max_gain_index].item()
num_left_sample = np.sum(count_hist[:max_gain_index + 1])
num_right_sample = np.sum(count_hist[max_gain_index + 1:])
info = {
'max_gain': max_gain,
'cum_grad': cum_grad_hist,
'cum_hess': cum_hess_hist,
'max_gain_index': max_gain_index,
"is_category": idx in remote_cat_index,
'cat_rank': cat_rank,
'num_left_sample': num_left_sample,
'num_right_sample': num_right_sample
}
gain_infos[party_id].append(info)
if not is_continue:
is_continue_flags[i] = is_continue
# No data will be send later, cal best_split_info
best_split_info: BestSplitInfo = best_split_info_dict[party_id]
for feature_idx, gain_info in enumerate(gain_infos[party_id]):
max_gain = gain_info["max_gain"]
cum_grad = gain_info["cum_grad"]
cum_hess = gain_info["cum_hess"]
max_gain_index = gain_info["max_gain_index"]
is_category = gain_info["is_category"]
cat_rank = gain_info["cat_rank"]
if max_gain > best_split_info.gain:
if len(cum_grad) == 1:
max_gain_split_index = 0
else:
max_gain_split_index = max_gain_index
if max_gain > best_split_info.gain:
best_split_info.gain = max_gain
best_split_info.feature_owner = party_id
best_split_info.feature_idx = feature_idx
best_split_info.split_point = None # should not know
best_split_info.missing_value_on_left = None # need not know
best_split_info.left_sample_index = None # get it later
best_split_info.right_sample_index = None # get it later
best_split_info.num_left_bin = gain_info["num_left_sample"]
best_split_info.num_right_bin = gain_info["num_right_sample"]
left_weight = cal_weight(cum_grad[max_gain_split_index],
cum_hess[max_gain_split_index],
self.tree_param.lambda_).item()
right_weight = cal_weight(cum_grad[-1] - cum_grad[max_gain_split_index],
cum_hess[-1] -
cum_hess[max_gain_split_index],
self.tree_param.lambda_).item()
best_split_info.left_bin_weight = left_weight
best_split_info.right_bin_weight = right_weight
best_split_info.max_gain_index = max_gain_index
# note this is not the final result of the left category
best_split_info.left_cat = [] if not cat_rank else cat_rank[:max_gain_index + 1]
best_split_info.is_category = is_category
flag = np.any(is_continue_flags)
if not flag:
break
gc.collect()
return best_split_info_dict
def get_feature_importance(self):
return self.feature_importance
def update_feature_importance(self, split_info: SplitInfo):
inc_split, inc_gain = 1, split_info.gain
owner_id = split_info.owner_id
fid = split_info.feature_idx
owner_name = owner_id
for node_id in FedNode.config["trainer"]:
if owner_id == node_id:
owner_name = FedNode.config["trainer"][owner_id]["name"]
break
if (owner_name, fid) not in self.feature_importance:
self.feature_importance[(owner_name, fid)] = FeatureImportance(
0, 0, self.feature_importance_type)
self.feature_importance[(owner_name, fid)].add_split(inc_split)
if inc_gain is not None:
self.feature_importance[(owner_name, fid)].add_gain(inc_gain)
def fit(self) -> Tree:
tree = Tree(self.party_id, self.tree_index)
thread_pool = ThreadPool(2)
logger.info(f"Decision tree {self.tree_index} training start..")
if self.tree_param.run_goss:
tree.root_node.sample_index = self.goss_selected_idx
for depth in range(self.tree_param.max_depth):
logger.info(f"Decision tree depth {depth} training start..")
this_depth_nodes = tree.search_nodes(depth)
for node in this_depth_nodes:
logger.info(f"Depth {depth} - node {node.id} start training.")
self.tree_node_chann.broadcast(node, use_pickle=True)
best_split_info_dict: Dict[str, BestSplitInfo] = {}
logger.info("Calculating local best split..")
if self.big_feature is not None:
res1 = thread_pool.apipe(self._cal_local_best_split, node)
logger.info("Calculating remote best split..")
res2 = thread_pool.apipe(self._cal_remote_best_split)
if self.big_feature is not None:
best_split_info_dict[self.party_id] = res1.get()
logger.info("Calculating local best split done.")
best_split_info_dict_remote = res2.get()
logger.info("Calculating remote best split done.")
best_split_info_dict.update(best_split_info_dict_remote)
party_ids = list(best_split_info_dict.keys())
best_split_party_id = party_ids[
np.argmax(
[best_split_info_dict[party_id].gain for party_id in party_ids])
]
best_split_info = best_split_info_dict[best_split_party_id]
if best_split_info.gain < self.tree_param.min_split_gain or \
min(best_split_info.num_left_bin,
best_split_info.num_right_bin) < self.tree_param.min_sample_split:
for party_id in self.min_split_info_channs:
self.min_split_info_channs[party_id].send([-1, -1, -1], use_pickle=True)
continue
if best_split_info.feature_owner == self.party_id:
for party_id in self.min_split_info_channs:
self.min_split_info_channs[party_id].send([-1, -1, -1], use_pickle=True)
split_info = SplitInfo(owner_id=best_split_info.feature_owner,
feature_idx=best_split_info.feature_idx,
is_category=best_split_info.is_category,
split_point=best_split_info.split_point,
left_cat=best_split_info.left_cat,
gain=best_split_info.gain)
else:
for party_id in self.min_split_info_channs:
if best_split_info.feature_owner == party_id:
self.min_split_info_channs[party_id].send(
[best_split_info.feature_idx, best_split_info.max_gain_index, best_split_info.left_cat],
use_pickle=True
)
else:
self.min_split_info_channs[party_id].send([-1, -1, -1], use_pickle=True)
split_info = SplitInfo(owner_id=best_split_info.feature_owner,
feature_idx=best_split_info.feature_idx,
gain=best_split_info.gain)
best_split_info.left_sample_index, best_split_info.right_sample_index = \
self.sample_index_after_split_channs[best_split_info.feature_owner].recv(
use_pickle=True)
left_node_id, right_node_id = tree.split(node_id=node.id,
split_info=split_info,
left_sample_index=best_split_info.left_sample_index,
right_sample_index=best_split_info.right_sample_index,
left_sample_weight=best_split_info.left_bin_weight,
right_sample_weight=best_split_info.right_bin_weight)
node.update_as_non_leaf(split_info=split_info,
left_node_id=left_node_id,
right_node_id=right_node_id)
self.update_feature_importance(split_info)
logger.info(f"Depth {depth} - node {node.id} finish training.")
gc.collect()
# calculate and update the progress of the training
self.progress_calculator.cal_custom_progress(self.tree_index, depth+1)
self.tree_node_chann.broadcast(None, use_pickle=True)
logger.info(f"Decision tree {self.tree_index} training finished")
return tree
| 26,246 | 50.974257 | 152 | py |
XFL | XFL-master/python/algorithm/framework/vertical/xgboost/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from pathlib import Path
from typing import Dict, Optional
import numpy as np
import pandas as pd
from pathos.pools import ThreadPool
from algorithm.core.data_io import NdarrayIterator
from algorithm.core.encryption_param import PaillierParam, PlainParam
from algorithm.core.tree.tree_structure import BoostingTree, Tree
from algorithm.core.tree.xgboost_loss import get_xgb_loss_inst
from common.checker.matcher import get_matched_config
from common.checker.x_types import All
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.algo_utils import earlyStopping
from common.utils.logger import logger
from common.utils.utils import save_model_config
from service.fed_config import FedConfig
from service.fed_node import FedNode
from .base import VerticalXgboostBase
from .decision_tree_label_trainer import VerticalDecisionTreeLabelTrainer
from service.fed_control import ProgressCalculator
from common.utils.model_io import ModelIO
class VerticalXgboostLabelTrainer(VerticalXgboostBase):
def __init__(self, train_conf: dict, *args, **kwargs):
self.channels = dict()
self.channels["sync"] = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, is_label_trainer=True, *args, **kwargs)
self.party_id = FedConfig.node_id
self.channels["encryption_context"] = BroadcastChannel(
name="encryption_context")
self.channels["individual_grad_hess"] = BroadcastChannel(
name="individual_grad_hess")
self.channels["tree_node"] = BroadcastChannel(name="tree_node")
self.channels["check_dataset_com"] = BroadcastChannel(
name="check_dataset_com")
summed_grad_hess_channs: Dict[str, DualChannel] = {}
min_split_info_channs: Dict[str, DualChannel] = {}
sample_index_after_split_channs: Dict[str, DualChannel] = {}
val_com: Dict[str, DualChannel] = {}
restart_com: Dict[str, DualChannel] = {}
early_stop_com: Dict[str, DualChannel] = {}
for party_id in FedConfig.get_trainer():
summed_grad_hess_channs[party_id] = \
DualChannel(name="summed_grad_hess_" + party_id,
ids=[FedConfig.node_id, party_id])
min_split_info_channs[party_id] = \
DualChannel(name="min_split_info_" + party_id,
ids=[FedConfig.node_id, party_id])
sample_index_after_split_channs[party_id] = \
DualChannel(name="sample_index_after_split_" +
party_id, ids=[FedConfig.node_id, party_id])
val_com[party_id] = \
DualChannel(name="val_com_" + party_id,
ids=[FedConfig.node_id, party_id])
restart_com[party_id] = \
DualChannel(name="restart_com_" + party_id,
ids=[FedConfig.node_id, party_id])
early_stop_com[party_id] = \
DualChannel(name="early_stop_com_" + party_id,
ids=[FedConfig.node_id, party_id])
self.channels["summed_grad_hess"] = summed_grad_hess_channs
self.channels["min_split_info"] = min_split_info_channs
self.channels["sample_index_after_split"] = sample_index_after_split_channs
self.channels["val_com"] = val_com
self.channels["restart_com"] = restart_com
self.channels["early_stop_com"] = early_stop_com
if isinstance(self.xgb_config.encryption_param, (PlainParam, type(None))):
self.private_context = None
elif isinstance(self.xgb_config.encryption_param, PaillierParam):
self.private_context = Paillier.context(self.xgb_config.encryption_param.key_bit_size,
self.xgb_config.encryption_param.djn_on)
self.public_context = self.private_context.to_public()
self.channels["encryption_context"].broadcast(
self.public_context.serialize(), use_pickle=False)
else:
raise TypeError(
f"Encryption param type {type(self.xgb_config.encryption_param)} not valid.")
self.es = earlyStopping(key=self.xgb_config.early_stopping_param["key"],
patience=self.xgb_config.early_stopping_param["patience"],
delta=self.xgb_config.early_stopping_param["delta"])
self.best_round = -1
self.best_prediction_val = None
self.best_prediction_train = None
if self.train_features is not None:
input_schema = ','.join([_ for _ in self.train_features.columns if _ not in set(["y", "id"])])
else:
input_schema = ""
self.export_conf = [{
"class_name": "VerticalXGBooster",
"identity": self.identity,
"filename": self.output.get("proto_model", {}).get("name", ''),
# "filename": self.output.get("proto_model", {"name": "vertical_xgboost_guest.pmodel"})["name"],
"input_schema": input_schema,
"version": '1.4.0'
}]
def _sync_config(self, config):
sync_rule = {
"train_info": {
"interaction_params": All(),
"train_params": {
"lossfunc": All(),
"num_trees": All(),
"num_bins": All(),
"batch_size_val": All(),
"downsampling": {
"row": {
"run_goss": All()
}
},
"encryption": All()
}
}
}
config_to_sync = get_matched_config(config, sync_rule)
self.channels["sync"].broadcast(config_to_sync)
def fit(self):
f_names = self.channels["sync"].collect()
self.remote_f_names = {}
for name_dict in f_names:
self.remote_f_names.update(name_dict)
self.check_dataset()
boosting_tree = BoostingTree()
# train_y_pred_primitive, tree_list = np.zeros_like(self.train_label), []
train_y_pred_primitive = np.zeros_like(self.train_label)
val_y_pred_primitive = np.zeros_like(self.val_label)
loss_inst = get_xgb_loss_inst(
list(self.xgb_config.loss_param.keys())[0])
train_y_pred, val_y_pred = loss_inst.predict(
train_y_pred_primitive), loss_inst.predict(val_y_pred_primitive)
for tree_idx in range(1, self.xgb_config.num_trees+1):
logger.info("Tree {} start training.".format(tree_idx))
# 0: no need to restart, 1: restart, 2: max number of try reached
restart_status = 1
while True:
# train section
sampled_features, feature_id_mapping = self.col_sample()
cat_columns_after_sampling = list(filter(
lambda x: feature_id_mapping[x] in self.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [
self.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
trainer = VerticalDecisionTreeLabelTrainer(tree_param=self.xgb_config,
y=self.train_label,
y_pred=train_y_pred,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=self.channels,
encryption_context=self.private_context,
feature_id_mapping=feature_id_mapping,
tree_index=tree_idx)
tree = trainer.fit()
if not tree.root_node.is_leaf:
restart_status = 0
else:
if self.xgb_config.early_stopping_param["patience"] <= 0:
# if not set patience, terminate immediately
restart_status = 2
else:
self.es.counter += 1
if self.es.counter == self.xgb_config.early_stopping_param["patience"]:
restart_status = 2
for party_id in FedConfig.get_trainer():
self.channels["restart_com"][party_id].send(restart_status)
if restart_status != 1:
break
logger.info(f"label trainer tree {tree_idx} training restart.")
if restart_status == 2:
logger.info("label trainer early stopped because a tree's root is leaf, best round: {}.".format(
self.best_round))
break
self.update_feature_importance(trainer.get_feature_importance())
if self.xgb_config.run_goss:
train_y_pred_primitive += self.predict_on_tree(
tree, self.train_dataset) * self.xgb_config.learning_rate
else:
for _, node in tree.nodes.items():
if node.is_leaf:
train_y_pred_primitive[node.sample_index] += node.weight * \
self.xgb_config.learning_rate
train_y_pred = loss_inst.predict(train_y_pred_primitive)
if self.interaction_params.get("echo_training_metrics"):
train_loss = loss_inst.cal_loss(
self.train_label, train_y_pred_primitive, after_prediction=False)
self._calc_metrics(self.train_label, train_y_pred, tree_idx, stage="train", loss={
loss_inst.name: train_loss})
tree.clear_training_info()
boosting_tree.append(tree=tree,
lr=self.xgb_config.learning_rate,
max_depth=self.xgb_config.max_depth)
logger.info("Tree {} training done.".format(tree_idx))
# validation section
logger.info("Validation on tree {} start.".format(tree_idx))
val_y_pred_primitive += self.predict_on_tree(
tree, self.val_dataset) * self.xgb_config.learning_rate
val_y_pred = loss_inst.predict(val_y_pred_primitive)
val_loss = loss_inst.cal_loss(
self.val_label, val_y_pred_primitive, after_prediction=False)
metric = self._calc_metrics(self.val_label, val_y_pred, tree_idx, stage="val",
loss={loss_inst.name: val_loss})
logger.info("Validation on tree {} done.".format(tree_idx))
if self.xgb_config.early_stopping_param["patience"] > 0:
early_stop_flag, save_flag = self.es(metric)
else:
early_stop_flag, save_flag = False, True
if save_flag:
# self.best_round = tree_idx + 1
self.best_round = tree_idx
self.best_prediction_train = copy.deepcopy(train_y_pred)
self.best_prediction_val = copy.deepcopy(val_y_pred)
for party_id in FedConfig.get_trainer():
self.channels["early_stop_com"][party_id].send(early_stop_flag)
if early_stop_flag:
logger.info(
"label trainer early stopped. best round: {}.".format(self.best_round))
break
# if self.interaction_params.get("save_frequency") > 0 and (tree_idx + 1) % self.interaction_params.get("save_frequency") == 0:
if self.interaction_params.get("save_frequency") > 0 and tree_idx % self.interaction_params.get("save_frequency") == 0:
# self.save(boosting_tree, epoch=tree_idx+1)
self.save(boosting_tree, epoch=tree_idx)
self._write_prediction(
# self.train_label, train_y_pred, self.train_ids, epoch=tree_idx + 1)
self.train_label, train_y_pred, self.train_ids, epoch=tree_idx)
self._write_prediction(
# self.val_label, val_y_pred, self.val_ids, epoch=tree_idx + 1, stage='val')
self.val_label, val_y_pred, self.val_ids, epoch=tree_idx, stage='val')
# add metrics during training for plot
self._write_loss(train_loss, val_loss, tree_idx)
# update the progress of 100 to show the training is finished
ProgressCalculator.finish_progress()
# model preserve
if self.xgb_config.early_stopping_param["patience"] <= 0:
self.best_round = len(boosting_tree)
self.best_prediction_train = copy.deepcopy(train_y_pred)
self.best_prediction_val = copy.deepcopy(val_y_pred)
else:
logger.info("num trees: %d, best: %d" % (len(boosting_tree), self.best_round))
if boosting_tree.trees:
logger.info("save")
# self.save(boosting_tree, final=True)
if self.best_round <= 0:
self.best_round = len(boosting_tree)
self.save(boosting_tree[:self.best_round], final=True)
logger.info('_write_prediction train')
self._write_prediction(
self.train_label, train_y_pred, self.train_ids, final=True)
logger.info('_write_prediction val')
self._write_prediction(
self.val_label, val_y_pred, self.val_ids, final=True, stage='val')
logger.info("Writing roc data...")
self._write_roc_data(
self.train_label, train_y_pred, self.val_label, val_y_pred)
logger.info("Writing ks data...")
self._write_ks_data(self.train_label, train_y_pred,
self.val_label, val_y_pred)
logger.info("Writing lift and gain data...")
self._write_lift_gain_data(
self.train_label, train_y_pred, self.val_label, val_y_pred
)
logger.info("Writing pr curve data...")
self._write_pr_data(
self.train_label, train_y_pred, self.val_label, val_y_pred)
self._write_feature_importance()
else:
logger.error("Model is none, ture off run_goss (false) and downsampling (1) please.")
raise SystemError(
"Model is none, ture off run_goss (false) and downsampling (1) please.")
def save(self, boosting_tree: BoostingTree, epoch: Optional[int] = None, final: bool = False):
if final:
save_model_config(stage_model_config=self.export_conf,
save_path=self.output.get("path"))
save_dir = self.output.get("path")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# dump out ks plot
suggest_threshold = 0.5
# if "ks" in self.xgb_config.metrics or "auc_ks" in self.xgb_config.metrics:
# # tc = ThresholdCutter(os.path.join(save_dir, "ks_plot_valid.csv"))
# tc = ThresholdCutter(os.path.join(
# save_dir, self.output.get("ks_plot_val")["name"]))
# # tc.cut_by_value(self.val_label, self.best_prediction_val)
# # suggest_threshold = float(tc.bst_threshold)
# # tc.save()
# if final:
# self.val_ks_metrics = tc.metrics
# self.val_ks_bst_threshold = tc.bst_threshold
# self.val_ks_bst_score = tc.bst_score
# if self.interaction_params.get("echo_training_metrics"):
# tc = ThresholdCutter(os.path.join(
# save_dir, self.output.get("ks_plot_train")["name"]))
# # tc.cut_by_value(self.train_label, self.best_prediction_train)
# # tc.save()
# if final:
# self.train_ks_metrics = tc.metrics
# self.train_ks_bst_threshold = tc.bst_threshold
# self.train_ks_bst_score = tc.bst_score
model_name = self.output.get("model", {}).get("name")
proto_name = self.output.get("proto_model", {}).get("name")
if model_name:
# model_dict = boosting_tree[:self.best_round].to_dict(
# suggest_threshold, compute_group=True)
model_dict = boosting_tree.to_dict(suggest_threshold, compute_group=True)
ModelIO.save_json_model(model_dict, save_dir, model_name, epoch=epoch, version='1.4.0')
if proto_name:
# TODO: temp
model_name_list = self.output.get("proto_model")["name"].split(".")
name_prefix, name_postfix = ".".join(
model_name_list[:-1]), model_name_list[-1]
if not final and epoch:
new_model_name = name_prefix + \
"_epoch_{}".format(epoch) + "." + name_postfix
else:
new_model_name = name_prefix + "." + name_postfix
model_path = os.path.join(save_dir, new_model_name)
# xgb_output = boosting_tree[:self.best_round].to_proto(
# suggest_threshold, compute_group=True)
xgb_output = boosting_tree.to_proto(suggest_threshold, compute_group=True)
with open(model_path, 'wb') as f:
f.write(xgb_output)
logger.info("model saved as: {}.".format(model_path))
self.make_readable_feature_importance(
os.path.join(save_dir, self.output.get("feature_importance")["name"]))
def make_readable_feature_importance(self, file_name):
with open(file_name, "w") as f:
f.write("owner_id,fid,importance\n")
normalizer = np.sum([_.get()
for _ in self.feature_importances_.values()])
for k, v in sorted(self.feature_importances_.items(), key=lambda d: d[1], reverse=True):
f.write("%s,%s,%.6g\n" % (k[0], k[1], v.get() / normalizer))
def _make_indicator_for_prediction(self, tree: Tree, feature: np.ndarray):
indicator = {}
for node_id, node in tree.nodes.items():
if not node.is_leaf and node.split_info.owner_id == self.party_id:
feature_idx = node.split_info.feature_idx
data = feature[:, feature_idx]
if node.split_info.is_category:
indicator[node_id] = np.isin(
data, node.split_info.left_cat)
else:
indicator[node_id] = (data <= node.split_info.split_point)
return indicator
def _gen_prediction(self, tree: Tree, indicator: Dict[str, np.ndarray], feature: np.ndarray):
prediction = np.zeros((len(feature),), dtype=np.float32)
depth = 0
sample_in_node = {}
while True:
node_list = tree.search_nodes(depth)
if not node_list:
break
for node in node_list:
if node.is_leaf:
prediction[sample_in_node[node.id]] = node.weight
else:
if depth == 0:
sample_in_node[node.left_node_id] = np.where(
indicator[node.id] == 1)[0]
sample_in_node[node.right_node_id] = np.where(
indicator[node.id] == 0)[0]
else:
sample_in_node[node.left_node_id] = np.intersect1d(
sample_in_node[node.id], np.where(indicator[node.id] == 1)[0])
sample_in_node[node.right_node_id] = np.intersect1d(
sample_in_node[node.id], np.where(indicator[node.id] == 0)[0])
depth += 1
return prediction
def predict_on_tree(self, tree: Tree, data_iterator: NdarrayIterator) -> np.ndarray:
prediction = np.zeros((len(data_iterator.data),), dtype=np.float32)
indicator = {}
def _update_local(tree, data):
res = self._make_indicator_for_prediction(tree, data)
return res
def _update_remote(channel, len_data):
remote_indicator = channel.recv()
res = {k: np.unpackbits(v)[:len_data]
for k, v in remote_indicator.items()}
return res
thread_pool = ThreadPool(len(self.channels["val_com"]) + 1)
for i, data in enumerate(data_iterator):
indicator = {}
threads = []
threads.append(thread_pool.apipe(_update_local, tree, data))
for party_id in self.channels["val_com"]:
threads.append(thread_pool.apipe(
_update_remote, self.channels["val_com"][party_id], len(data)))
for t in threads:
indicator.update(t.get())
prediction[i * data_iterator.bs: (
i + 1) * data_iterator.bs] = self._gen_prediction(tree, indicator, data)
return prediction
# Non-parallelized version
# def predict_on_tree(self, tree: Tree, data_iterator: NdarrayIterator) -> np.ndarray:
# prediction = np.zeros((len(data_iterator.data),), dtype=np.float32)
# for i, data in enumerate(data_iterator):
# indicator = {}
# indicator.update(self._make_indicator_for_prediction(tree, data))
# for party_id in self.channels["val_com"]:
# remote_indicator = self.channels["val_com"][party_id].recv()
# indicator.update({k: np.unpackbits(v)[:len(data)] for k, v in remote_indicator.items()})
# prediction[i * data_iterator.bs: (i + 1) * data_iterator.bs] = self._gen_prediction(tree, indicator, data)
# return prediction
def predict_on_boosting_tree(self, boosting_tree: BoostingTree, data_iterator: NdarrayIterator) -> np.ndarray:
prediction = np.zeros((len(data_iterator.data),), dtype=np.float32)
def _update_local(trees, data):
res = {}
for tree in trees:
res.update(self._make_indicator_for_prediction(tree, data))
return res
def _update_remote(channel, len_data):
remote_indicator = channel.recv()
res = {k: np.unpackbits(v)[:len_data]
for k, v in remote_indicator.items()}
return res
thread_pool = ThreadPool(len(self.channels["val_com"]) + 1)
for i, data in enumerate(data_iterator):
indicator = {}
threads = []
threads.append(thread_pool.apipe(
_update_local, boosting_tree.trees, data))
for party_id in self.channels["val_com"]:
threads.append(thread_pool.apipe(
_update_remote, self.channels["val_com"][party_id], len(data)))
for t in threads:
indicator.update(t.get())
for j, tree in enumerate(boosting_tree.trees):
prediction[i * data_iterator.bs: (i + 1) * data_iterator.bs] += \
self._gen_prediction(
tree, indicator, data) * boosting_tree.lr[j]
return prediction
# Non-parallelized version
# def predict_on_boosting_tree(self, boosting_tree: BoostingTree, data_iterator: NdarrayIterator) -> np.ndarray:
# prediction = np.zeros((len(data_iterator.data),), dtype=np.float32)
# for i, data in enumerate(data_iterator):
# indicator = {}
# for tree in boosting_tree.trees:
# indicator.update(self._make_indicator_for_prediction(tree, data))
# for party_id in self.channels["val_com"]:
# remote_indicator = self.channels["val_com"][party_id].recv()
# indicator.update({k: np.unpackbits(v)[:len(data)] for k, v in remote_indicator.items()})
# for j, tree in enumerate(boosting_tree.trees):
# prediction[i * data_iterator.bs: (i + 1) * data_iterator.bs] += \
# self._gen_prediction(tree, indicator, data) * boosting_tree.lr[j]
# return prediction
def update_feature_importance(self, tree_feature_importance):
for (owner_name, fid) in tree_feature_importance:
if owner_name == FedConfig.node_name:
f_name = self.train_names[fid]
else:
f_name = self.remote_f_names[owner_name][fid]
if (owner_name, f_name) not in self.feature_importances_:
self.feature_importances_[
(owner_name, f_name)] = tree_feature_importance[(owner_name, fid)]
else:
self.feature_importances_[
(owner_name, f_name)] += tree_feature_importance[(owner_name, fid)]
# if (owner_id, fid) not in self.feature_importances_:
# self.feature_importances_[
# (owner_id, fid)] = tree_feature_importance[(owner_id, fid)]
# else:
# self.feature_importances_[
# (owner_id, fid)] += tree_feature_importance[(owner_id, fid)]
logger.debug("cur feature importance {}".format(
self.feature_importances_))
def load_model(self):
pretrain_path = self.input.get("pretrained_model", {}).get("path", '')
model_name = self.input.get("pretrained_model", {}).get("name", '')
model_path = Path(
pretrain_path, model_name
)
suffix = model_name.split(".")[-1]
if suffix != "pmodel":
model_dict = ModelIO.load_json_model(model_path)
boosting_tree = BoostingTree.from_dict(model_dict)
else:
with open(model_path, 'rb') as f:
byte_str = f.read()
boosting_tree = BoostingTree.from_proto(byte_str)
return boosting_tree
def check_dataset(self):
shapes = self.channels["check_dataset_com"].collect()
if self.train_dataset is not None:
m = len(self.train_ids)
n = len(self.train_features.columns)
for d in shapes:
if d["train"][0] != m:
raise ValueError(
"Lengths of the train set mismatched: %d, %d." % (d["train"][0], m))
n += d["train"][1]
if n <= 0:
raise ValueError(
"Number of the feature is zero. Stop training.")
if self.val_dataset is not None:
m = len(self.val_ids)
n = len(self.val_features.columns)
for d in shapes:
if d["valid"][0] != m:
raise ValueError(
"Lengths of the valid set mismatched: %d, %d." % (d["valid"][0], m))
n += d["valid"][1]
if n <= 0:
raise ValueError(
"Number of the feature is zero. Stop training.")
if self.test_dataset is not None:
m = len(self.test_ids)
n = len(self.test_features.columns)
for d in shapes:
if d["test"][0] != m:
raise ValueError(
"Lengths of the test set mismatched: %d, %d." % (d["test"][0], m))
n += d["test"][1]
if n <= 0:
raise ValueError(
"Number of the feature is zero. Stop predicting.")
else:
if len(shapes) > 0 and "test" in shapes[0]:
m = shapes[0]["test"][0]
n = 0
for d in shapes:
if d["test"][0] != m:
raise ValueError("Lengths of the test set mismatched.")
n += d["test"][1]
if n <= 0:
raise ValueError(
"Number of the feature is zero. Stop predicting.")
else:
self.test_dataset = NdarrayIterator(
np.zeros((m, 0)), self.bs)
self.test_ids = np.arange(m)
def predict(self):
out_dict_list = self.channels["sync"].collect()
self.check_dataset()
boosting_tree = self.load_model()
test_y_pred_primitive = self.predict_on_boosting_tree(boosting_tree=boosting_tree,
data_iterator=self.test_dataset)
loss_inst = get_xgb_loss_inst(boosting_tree.loss_method)
test_y_pred = loss_inst.predict(test_y_pred_primitive)
output = {
"testset": test_y_pred
}
for out_keys_dict in out_dict_list:
for key in out_keys_dict:
if key in output:
out_keys_dict[key] = output["testset"]
self.channels["sync"].scatter(out_dict_list)
save_path = self.output.get("path", '')
if save_path:
if not os.path.exists(save_path):
os.makedirs(save_path)
file_path = Path(save_path, self.output.get("testset", {}).get("name", ''))
if file_path:
logger.info("predicted results saved at {}".format(file_path))
pd.DataFrame({"id": self.test_ids, "pred": test_y_pred}).to_csv(
file_path, float_format="%.6g", index=False, header=True
)
| 30,579 | 44.57377 | 139 | py |
XFL | XFL-master/python/algorithm/framework/vertical/xgboost/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/vertical/xgboost/decision_tree_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import math
from typing import Dict, Optional, Union
import numpy as np
import pandas as pd
import ray
from algorithm.core.encryption_param import PaillierParam, PlainParam
from algorithm.core.tree.big_feature import Feature
from algorithm.core.tree.tree_param import XGBTreeParam
from algorithm.core.tree.tree_structure import Node, SplitInfo
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier, PaillierContext
from common.crypto.paillier.utils import get_core_num
from common.utils.constants import PAILLIER, PLAIN
from common.utils.logger import logger
try:
from ray.internal.internal_api import free
except Exception:
from ray._private.internal_api import free
from service.fed_config import FedConfig
from .debug_params import EMBEDING
class VerticalDecisionTreeTrainer(object):
def __init__(self,
tree_param: XGBTreeParam,
features: pd.DataFrame,
cat_columns: list,
split_points: np.ndarray,
channels: Dict[str, Union[BroadcastChannel, DualChannel]],
encryption_context: Optional[PaillierContext] = None,
feature_id_mapping: Optional[Dict[int, int]] = None,
tree_index: Optional[int] = None):
logger.info(f"Trainer decision tree {tree_index} initialize start.")
if tree_param.encryption_param.method not in [PAILLIER, PLAIN]:
raise ValueError(f"Encryption method {tree_param.encryption_param.method} not supported.")
self.tree_param = tree_param
self.features = features
self.cat_columns = cat_columns
self.split_points = split_points
self.party_id = FedConfig.node_id
self.max_num_cores = get_core_num(tree_param.max_num_cores)
self.tree_index = tree_index
self.individual_grad_hess: BroadcastChannel = channels["individual_grad_hess"]
self.tree_node_chann: BroadcastChannel = channels["tree_node"]
self.summed_grad_hess_chann: DualChannel = channels["summed_grad_hess"]
self.min_split_info_chann: DualChannel = channels["min_split_info"]
self.sample_index_after_split_chann: DualChannel = channels["sample_index_after_split"]
self.encryption_param = self.tree_param.encryption_param
self.pub_context = encryption_context
self.feature_id_mapping = feature_id_mapping
if isinstance(self.encryption_param, PlainParam):
self.grad, self.hess = self.individual_grad_hess.recv(use_pickle=True)
elif isinstance(self.encryption_param, PaillierParam):
if EMBEDING:
self.grad_hess = self.individual_grad_hess.recv(use_pickle=True)
self.grad_hess = Paillier.ciphertext_from(self.pub_context, self.grad_hess, compression=False)
else:
self.grad, self.hess = self.individual_grad_hess.recv(use_pickle=True)
self.grad = Paillier.ciphertext_from(self.pub_context, self.grad, compression=False) # ciphertext
self.hess = Paillier.ciphertext_from(self.pub_context, self.hess, compression=False)
else:
raise ValueError("Encryption param not supported.")
logger.info(f"Trainer decision tree {tree_index} initialize finished.")
def fit(self) -> Dict[str, Node]:
logger.info(f"Decision tree {self.tree_index} training start..")
nodes = {}
count = 0
while True:
node: Node = self.tree_node_chann.recv(use_pickle=True)
if node is None:
break
logger.info(f"Node {node.id} training start..")
if count == 0:
if node.sample_index is None:
node.sample_index = range(self.features.shape[0])
self.sample_index = node.sample_index
if isinstance(self.encryption_param, PlainParam):
self.big_feature = Feature.create(values=self.features.iloc[node.sample_index, :],
sample_index=node.sample_index,
grad=self.grad,
hess=self.hess)
else:
if EMBEDING:
self.big_feature = Feature.create(values=self.features.iloc[node.sample_index, :],
sample_index=node.sample_index,
grad_hess=self.grad_hess)
else:
self.big_feature = Feature.create(values=self.features.iloc[node.sample_index, :],
sample_index=node.sample_index,
grad=self.grad,
hess=self.hess)
big_feature = self.big_feature
else:
big_feature = self.big_feature.slice_by_sample_index(node.sample_index)
gc.collect()
count += 1
logger.info(f"Node {node.id} finish preparing data.")
def cal_grad_hess_hist_apart(col_name: str):
res = big_feature.data.groupby([col_name])[['xfl_grad', 'xfl_hess']].agg({'count', 'sum'})
return res
logger.info(f"Node {node.id} calculating grad hess hist start..")
send_times = math.ceil(len(big_feature.feature_columns) / self.tree_param.col_batch)
res_hist_list = []
if isinstance(self.encryption_param, PaillierParam):
if EMBEDING:
for i in range(send_times):
# split by features
cat_index = list(
set(self.cat_columns).intersection(set(range(i * self.tree_param.col_batch, (i + 1) * self.tree_param.col_batch)))
)
num = int(math.ceil(big_feature.data.shape[0] / self.tree_param.row_batch))
for j in range(num):
data_id = ray.put(big_feature.data.iloc[self.tree_param.row_batch * j: self.tree_param.row_batch * (j + 1), :])
@ray.remote
def cal_grad_hess_hist_embeding(col_name: str):
res = ray.get(data_id).groupby([col_name])['xfl_grad_hess'].agg({'count', 'sum'})
return res
ray_tasks = []
for col_name in big_feature.feature_columns[i * self.tree_param.col_batch: (i + 1) * self.tree_param.col_batch]:
ray_tasks.append(cal_grad_hess_hist_embeding.remote(col_name))
b = ray.get(ray_tasks)
free(data_id)
free(ray_tasks)
if j == 0:
res = b
else:
b_id = ray.put(b)
res_id = ray.put(res)
@ray.remote
def merge_embeding(k):
res = ray.get(res_id)
b = ray.get(b_id)
r = pd.merge(res[k], b[k], how='outer', left_index=True, right_index=True).fillna(0)
r = pd.Series(b[k].columns).apply(lambda x: r[x+'_x'] + r[x+'_y']).T
r.columns = list(b[k].columns)
return r
ray_tasks = [merge_embeding.remote(k) for k in range(len(b))]
res = ray.get(ray_tasks)
free(b_id)
free(res_id)
free(ray_tasks)
gc.collect()
res_hist_partial_list = res
hist_list = [(res_hist['sum'].to_numpy(), res_hist['count'].to_numpy()) for res_hist in res_hist_partial_list]
if (i + 1) == send_times:
# 【stop_flag, hist_list, index of category feature(in binary form)]
self.summed_grad_hess_chann.send([False, hist_list, cat_index], use_pickle=True)
else:
self.summed_grad_hess_chann.send([True, hist_list, cat_index], use_pickle=True)
res_hist_list += res_hist_partial_list
gc.collect()
else:
for i in range(send_times):
cat_index = list(
set(self.cat_columns).intersection(set(range(i * self.tree_param.col_batch, (i + 1) * self.tree_param.col_batch)))
)
num = int(math.ceil(big_feature.data.shape[0] / self.tree_param.row_batch))
for j in range(num):
data_id = ray.put(big_feature.data.iloc[self.tree_param.row_batch * j: self.tree_param.row_batch * (j + 1), :])
@ray.remote
def cal_grad_hess_hist(col_name: str):
res = ray.get(data_id).groupby([col_name])[['xfl_grad', 'xfl_hess']].agg({'count', 'sum'})
return res
ray_tasks = []
for col_name in big_feature.feature_columns[i * self.tree_param.col_batch: (i + 1) * self.tree_param.col_batch]:
ray_tasks.append(cal_grad_hess_hist.remote(col_name))
b = ray.get(ray_tasks)
free(data_id)
free(ray_tasks)
if j == 0:
res = b
else:
b_id = ray.put(b)
res_id = ray.put(res)
@ray.remote
def merge(k):
res = ray.get(res_id)
b = ray.get(b_id)
r = pd.merge(res[k], b[k], how='outer', left_index=True, right_index=True).fillna(0)
r = pd.Series(list(b[k].columns)).apply(lambda x: r[(x[0]+'_x', x[1])] + r[(x[0]+'_y', x[1])]).T
r.columns = pd.MultiIndex.from_tuples(b[k].columns)
return r
ray_tasks = [merge.remote(k) for k in range(len(b))]
res = ray.get(ray_tasks)
free(b_id)
free(res_id)
free(ray_tasks)
gc.collect()
res_hist_partial_list = res
hist_list = [(res_hist[('xfl_grad', 'sum')].to_numpy(),
res_hist[('xfl_hess', 'sum')].to_numpy(),
res_hist[('xfl_grad', 'count')].to_numpy()) for res_hist in res_hist_partial_list]
if (i + 1) == send_times:
self.summed_grad_hess_chann.send([False, hist_list, cat_index], use_pickle=True)
else:
self.summed_grad_hess_chann.send([True, hist_list, cat_index], use_pickle=True)
res_hist_list += res_hist_partial_list
gc.collect()
else:
cat_index = self.cat_columns
res_hist_list = pd.Series(big_feature.feature_columns).apply(cal_grad_hess_hist_apart)
hist_list = [(res_hist[('xfl_grad', 'sum')].to_numpy(),
res_hist[('xfl_hess', 'sum')].to_numpy(),
res_hist[('xfl_grad', 'count')].to_numpy()) for res_hist in res_hist_list]
self.summed_grad_hess_chann.send([False, hist_list, cat_index], use_pickle=True)
gc.collect()
logger.info(f"Node {node.id} calculating grad hess hist finished")
feature_idx, max_gain_index, left_cat = self.min_split_info_chann.recv(use_pickle=True)
if feature_idx == -1:
continue
if feature_idx in self.cat_columns:
if isinstance(self.encryption_param, PaillierParam) and EMBEDING:
left_cat_index = res_hist_list[feature_idx]['sum'].index[left_cat]
else:
left_cat_index = res_hist_list[feature_idx][('xfl_grad', 'sum')].index[left_cat]
left_cat_values_ori = [self.split_points[feature_idx][index] for index in left_cat_index]
left_cat_values = []
for cat in left_cat_values_ori:
if isinstance(cat, list):
left_cat_values += cat
else:
left_cat_values.append(cat)
split_info = SplitInfo(owner_id=self.party_id,
feature_idx=self.feature_id_mapping[feature_idx].item(),
is_category=True,
split_point=None,
left_cat=left_cat_values)
if isinstance(self.encryption_param, PaillierParam) and EMBEDING:
left_sample_index = big_feature.data[big_feature.data.iloc[:, feature_idx + 2].isin(left_cat)]['xfl_id'].tolist()
right_sample_index = big_feature.data[big_feature.data.iloc[:, feature_idx + 2].isin(left_cat)]['xfl_id'].tolist()
else:
left_sample_index = big_feature.data[big_feature.data.iloc[:, feature_idx + 3].isin(left_cat)]['xfl_id'].tolist()
right_sample_index = big_feature.data[big_feature.data.iloc[:, feature_idx + 3].isin(left_cat)]['xfl_id'].tolist()
else:
if isinstance(self.encryption_param, PaillierParam) and EMBEDING:
# works when goss is true
split_point_index = int(res_hist_list[feature_idx]['sum'].index[max_gain_index])
else:
split_point_index = int(res_hist_list[feature_idx][('xfl_grad', 'sum')].index[max_gain_index])
# may not be necessary, just for safe
split_point_index = min(split_point_index, len(self.split_points[feature_idx]) - 1)
split_point = self.split_points[feature_idx][split_point_index]
split_info = SplitInfo(owner_id=self.party_id,
feature_idx=self.feature_id_mapping[feature_idx].item(),
is_category=False,
split_point=split_point,
left_cat=None)
if isinstance(self.encryption_param, PaillierParam) and EMBEDING:
left_sample_index = big_feature.data[big_feature.data.iloc[:, feature_idx + 2] <= split_point_index]['xfl_id'].tolist()
right_sample_index = big_feature.data[big_feature.data.iloc[:, feature_idx + 2] > split_point_index]['xfl_id'].tolist()
else:
left_sample_index = big_feature.data[big_feature.data.iloc[:, feature_idx + 3] <= split_point_index]['xfl_id'].tolist()
right_sample_index = big_feature.data[big_feature.data.iloc[:, feature_idx + 3] > split_point_index]['xfl_id'].tolist()
nodes[node.id] = Node(id=node.id, split_info=split_info, is_leaf=False)
self.sample_index_after_split_chann.send([left_sample_index, right_sample_index], use_pickle=True)
logger.info(f"Node {node.id} training finished.")
return nodes
| 17,678 | 53.733746 | 142 | py |
XFL | XFL-master/python/algorithm/framework/vertical/xgboost/debug_params.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
EMBEDING = True
| 622 | 35.647059 | 74 | py |
XFL | XFL-master/python/algorithm/framework/vertical/xgboost/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from pathlib import Path
from typing import Dict
import pandas as pd
import numpy as np
import ray
from algorithm.core.data_io import NdarrayIterator
from algorithm.core.encryption_param import PaillierParam, PlainParam
from algorithm.core.tree.tree_structure import Node, NodeDict
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.crypto.paillier.utils import get_core_num
from common.utils.logger import logger
from common.utils.utils import save_model_config, update_dict
from service.fed_config import FedConfig
from .base import VerticalXgboostBase
from .decision_tree_trainer import VerticalDecisionTreeTrainer
from service.fed_job import FedJob
from service.fed_node import FedNode
from common.utils.model_io import ModelIO
class VerticalXgboostTrainer(VerticalXgboostBase):
def __init__(self, train_conf: dict, *args, **kwargs):
self.channels = dict()
self.channels["sync"] = BroadcastChannel(name="sync")
conf = self._sync_config()
update_dict(train_conf, conf)
super().__init__(train_conf, is_label_trainer=False, *args, **kwargs)
self.channels["encryption_context"] = BroadcastChannel(
name="encryption_context")
self.channels["individual_grad_hess"] = BroadcastChannel(
name="individual_grad_hess")
self.channels["tree_node"] = BroadcastChannel(name="tree_node")
self.channels["check_dataset_com"] = BroadcastChannel(
name="check_dataset_com")
self.channels["summed_grad_hess"] = DualChannel(name="summed_grad_hess_" + FedConfig.node_id,
ids=FedConfig.get_label_trainer() + [FedConfig.node_id])
self.channels["min_split_info"] = DualChannel(name="min_split_info_" + FedConfig.node_id,
ids=FedConfig.get_label_trainer() + [FedConfig.node_id])
self.channels["sample_index_after_split"] = DualChannel(name="sample_index_after_split_" + FedConfig.node_id,
ids=FedConfig.get_label_trainer() + [FedConfig.node_id])
self.channels["val_com"] = DualChannel(name="val_com_" + FedConfig.node_id,
ids=FedConfig.get_label_trainer() + [FedConfig.node_id])
self.channels["restart_com"] = DualChannel(name="restart_com_" + FedConfig.node_id,
ids=FedConfig.get_label_trainer() + [FedConfig.node_id])
self.channels["early_stop_com"] = DualChannel(name="early_stop_com_" + FedConfig.node_id,
ids=FedConfig.get_label_trainer() + [FedConfig.node_id])
if isinstance(self.xgb_config.encryption_param, (PlainParam, type(None))):
self.public_context = None
elif isinstance(self.xgb_config.encryption_param, PaillierParam):
self.public_context = self.channels["encryption_context"].recv(
use_pickle=False)
self.public_context = Paillier.context_from(self.public_context)
else:
raise TypeError(
f"Encryption param type {type(self.xgb_config.encryption_param)} not valid.")
if self.train_features is not None:
input_schema = ','.join([_ for _ in self.train_features.columns if _ not in set(["y", "id"])])
else:
input_schema = ""
self.export_conf = [{
"class_name": "VerticalXGBooster",
"identity": self.identity,
"filename": self.output.get("proto_model", {}).get("name", ''),
"input_schema": input_schema,
}]
ray.init(num_cpus=get_core_num(self.xgb_config.max_num_cores),
ignore_reinit_error=True)
def _sync_config(self):
config = self.channels["sync"].recv()
return config
def fit(self):
self.channels["sync"].send({FedNode.node_name: self.train_names})
self.check_dataset()
# nodes_dict = {}
node_dict = NodeDict()
for tree_idx in range(1, self.xgb_config.num_trees+1):
# training section
logger.info("Tree {} start training.".format(tree_idx))
restart_status = 0
while True:
sampled_features, feature_id_mapping = self.col_sample()
# col index in feature
cat_columns_after_sampling = list(
filter(lambda x: feature_id_mapping[x] in self.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [self.split_points[feature_id_mapping[k]] for k in
feature_id_mapping.keys()]
trainer = VerticalDecisionTreeTrainer(tree_param=self.xgb_config,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=self.channels,
encryption_context=self.public_context,
feature_id_mapping=feature_id_mapping,
tree_index=tree_idx)
nodes = trainer.fit()
restart_status = self.channels["restart_com"].recv()
if restart_status != 1:
break
logger.info(f"trainer tree {tree_idx} training restart.")
logger.info("Tree {} training done.".format(tree_idx))
if restart_status == 2:
logger.info(
"trainer early stopped. because a tree's root is leaf.")
break
node_dict.update(nodes)
if self.xgb_config.run_goss:
self.predict_on_tree(nodes, self.train_dataset)
# valid section
logger.info(
"trainer: Validation on tree {} start.".format(tree_idx))
self.predict_on_tree(nodes, self.val_dataset)
if self.channels["early_stop_com"].recv():
logger.info("trainer early stopped.")
break
logger.info("Validation on tree {} done.".format(tree_idx))
# if self.interaction_params.get("save_frequency") > 0 and (tree_idx + 1) % self.interaction_params.get(
# "save_frequency") == 0:
# self.save(node_dict, epoch=tree_idx + 1)
if self.interaction_params.get("save_frequency") > 0 and tree_idx % self.interaction_params.get(
"save_frequency") == 0:
self.save(node_dict, epoch=tree_idx)
# model preserve
self.save(node_dict, final=True)
ray.shutdown()
def _make_indicator_for_prediction(self, nodes: Dict[str, Node], feature: np.ndarray):
indicator = {}
for node_id, node in nodes.items():
feature_idx = node.split_info.feature_idx
data = feature[:, feature_idx]
if node.split_info.is_category:
indicator[node_id] = np.isin(data, node.split_info.left_cat)
else:
indicator[node_id] = (data <= node.split_info.split_point)
return indicator
def predict_on_tree(self, nodes: Dict[str, Node], data_iterator: NdarrayIterator):
for data in data_iterator:
indicator = self._make_indicator_for_prediction(nodes, data)
indicator = {k: np.packbits(v) for k, v in indicator.items()}
self.channels["val_com"].send(indicator)
def predict_on_boosting_tree(self, nodes: Dict[str, Node], data_iterator: NdarrayIterator):
self.predict_on_tree(nodes, data_iterator)
def save(self, node_dict: NodeDict, epoch: int = None, final: bool = False):
if final:
save_model_config(stage_model_config=self.export_conf, save_path=self.output.get("path"))
save_dir = self.output.get("path")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_name = self.output.get("model", {}).get("name")
proto_name = self.output.get("proto_model", {}).get("name")
if model_name:
out_dict = node_dict.to_dict()
model_dict = {"nodes": out_dict}
ModelIO.save_json_model(model_dict, save_dir, model_name, epoch=epoch, version='1.4.0')
if proto_name:
model_name_list = self.output.get("proto_model")["name"].split(".")
name_prefix, name_postfix = ".".join(
model_name_list[:-1]), model_name_list[-1]
if not final and epoch:
model_name = name_prefix + "_epoch_{}".format(epoch) + "." + name_postfix
else:
model_name = name_prefix + "." + name_postfix
model_path = os.path.join(save_dir, model_name)
xgb_output = node_dict.to_proto()
with open(model_path, 'wb') as f:
f.write(xgb_output)
logger.info("model saved as: {}.".format(model_path))
def load_model(self):
pretrain_path = self.input.get("pretrained_model", {}).get("path", '')
model_name = self.input.get("pretrained_model", {}).get("name", '')
model_path = Path(
pretrain_path, model_name
)
suffix = model_name.split(".")[-1]
if suffix != "pmodel":
model_dict = ModelIO.load_json_model(model_path)
node_dict = NodeDict.from_dict(model_dict["nodes"])
else:
with open(model_path, 'rb') as f:
byte_str = f.read()
node_dict = NodeDict.from_proto(byte_str)
return node_dict
def check_dataset(self):
d = dict()
if self.train_dataset is not None:
d["train"] = len(self.train_ids), len(self.train_features.columns)
if self.val_dataset is not None:
d["valid"] = len(self.val_ids), len(self.val_features.columns)
if self.test_dataset is not None:
d["test"] = len(self.test_ids), len(self.test_features.columns)
self.channels["check_dataset_com"].send(d)
def predict(self):
out_dict = {key: None for key, value in self.train_conf.get("output", {}).items() if key != "path" and value.get("name")}
self.channels["sync"].send(out_dict)
self.check_dataset()
node_dict = self.load_model()
self.predict_on_boosting_tree(nodes=node_dict.nodes,
data_iterator=self.test_dataset)
out_dict = self.channels["sync"].recv()
save_path = self.output.get("path", '')
if save_path:
if not os.path.exists(save_path):
os.makedirs(save_path)
for key in out_dict:
file_path = Path(save_path, self.output[key]["name"])
if key == "testset" and file_path:
logger.info("predicted results saved at {}".format(file_path))
pd.DataFrame({"id": self.test_ids, "pred": out_dict[key]}).to_csv(
file_path, float_format="%.6g", index=False, header=True
)
| 12,254 | 43.563636 | 129 | py |
XFL | XFL-master/python/algorithm/framework/vertical/binning_woe_iv/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
from pathlib import Path
import numpy as np
import pandas as pd
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from sklearn.preprocessing import LabelEncoder
def fintech_cut(ser, bins, nan_l, method):
# all labels begins with 1
def rebin_freq_2(threshold, num):
if num <= threshold[0]:
return 1
elif threshold[0] < num <= threshold[1]:
return 2
else:
return 3
def ret_bin(ser_, cut_v):
l_tmp = [float("%.6f" % i) for i in list(cut_v[1])]
if len(cut_v[1]) > 2:
cut_bins = [-np.inf] + l_tmp[1:-1] + [np.inf]
if method == "equal_frequency":
cut_ser = pd.DataFrame(LabelEncoder().fit_transform(
cut_v[0])).set_index(cut_v[0].index)[0]
cut_ser = cut_ser + 1
elif method == "equal_width":
cut_ser = cut_v[0]
cut_ser = cut_ser.cat.remove_unused_categories()
else:
# never equal_width
cut_bins = [-np.inf] + [float("%.6f" % i)
for i in l_tmp] + [np.inf]
cut_ser = ser_.apply(lambda x: rebin_freq_2(cut_v[1], x))
return cut_ser, cut_bins
def range_map(nums, _tmp, i):
if i > 0:
left = _tmp[nums[i - 1] - 1][1]
right = _tmp[nums[i] - 1][1]
else:
left = _tmp[num[i] - 1][0]
right = _tmp[num[i] - 1][1]
return f"({left}, {right}]", [left, right]
# split nan and norm
col_dict = {}
ser_name = ser.name
bins_ser = []
ser_cut = ser.loc[ser[~ser.isin(nan_l)].index]
nan_ind = ser[ser.isin(nan_l)].index
ser_nan = ser.loc[nan_ind]
if len(ser_nan) > 0:
nan_value = float("%.6f" % ser_nan.loc[np.min(nan_ind)])
bins_ser.append(nan_value)
ser_nan = pd.DataFrame(np.zeros(len(ser_nan)) +
nan_value).set_index(ser_nan.index)[0]
col_dict[nan_value] = nan_value
else:
pass
if len(ser_cut) > 0:
# first cut
cut_value = None
if len(set(ser_cut)) > 1:
if method == "equal_width":
cut_value = pd.cut(ser_cut, bins, retbins=True, labels=[
i + 1 for i in range(bins)])
elif method == "equal_frequency":
cut_value = pd.qcut(
ser_cut, bins, retbins=True, duplicates='drop')
elif len(set(ser_cut)) == 1:
cut_value = (pd.Series(len(ser_cut)*[1]), list(set(ser_cut)))
cut_ser_, cut_bin = ret_bin(ser_cut, cut_value)
# label-range map
tmp_ = [[cut_bin[i], cut_bin[i + 1]] for i in range(len(cut_bin) - 1)]
num = sorted(set(cut_ser_))
tt = []
col_dict.update(dict(zip([num[i] for i in range(len(num))], [
range_map(num, tmp_, i)[0] for i in range(len(num))])))
for i in range(len(num)):
tt += range_map(num, tmp_, i)[1]
cut_bin_final = sorted(set(tt))
# concat nan and cut
ser = pd.concat([ser_nan, cut_ser_]).loc[ser.index]
bins_ser = bins_ser + cut_bin_final
# rename the rightest value to -inf/inf
if bins_ser[-1] != np.inf:
bins_ser = bins_ser[:-1] + [np.inf]
col_dict[list(col_dict.keys(
))[-1]] = f"{col_dict[list(col_dict.keys())[-1]].split(' ')[0]} {np.inf}]"
if bins_ser[0] != -np.inf and bins_ser[0] not in nan_l:
bins_ser = [-np.inf] + bins_ser[1:]
col_dict[list(col_dict.keys())[
0]] = f"({-np.inf}, {col_dict[list(col_dict.keys())[0]].split(' ')[1]}]"
else:
ser = ser_nan
ser.name = ser_name
return ser, bins_ser, {ser.name: col_dict}
class VerticalBinningWoeIvBase(TrainConfigParser):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""[summary]
Args:
train_conf (dict): [description]
"""
super().__init__(train_conf)
self.train_conf = train_conf
self.df = None
self.val = None
self.label = label
self.woe_map = {}
self.binning_split = {}
if self.interaction_params:
self.save_model = self.interaction_params.get("save_model", False)
else:
self.save_model = False
self.save_dir = Path(self.output.get("path"))
self.transform_switch = False
self._init_data()
if self.save_model:
self.save_model_name = self.output.get("model").get("name")
self.export_conf = [{
"class_name": "VerticalBinningWoeIv",
"filename": self.save_model_name,
"bins": self.train_params["binning"]["bins"],
"input_schema": ','.join([_ for _ in self.df.columns if _ not in set(["y", "id"])]),
}]
self.feature_binning()
def _init_data(self) -> None:
"""Load data: input data with id
Returns:
"""
logger.info("Start reading data.")
if self.input_trainset[0].get("has_id", True):
# index_col = self.input_trainset[0].get("index_col", 'id')
index_col = 0
else:
index_col = None
if self.input_trainset[0]["type"] == "csv":
file_path = str(
Path(self.input_trainset[0]["path"], self.input_trainset[0]["name"]))
if self.computing_engine == "local":
self.df = pd.read_csv(file_path, index_col=index_col)
logger.info("Reading data successfully.")
if self.input_trainset[0].get("has_label", False):
# self.y = self.df[label_name]
# self.df = self.df.drop(label_name, axis=1)
self.df.columns = ["y"] + list(self.df.columns[1:])
self.y = self.df.iloc[:, 0]
self.df = self.df.iloc[:, 1:]
else:
self.y = None
# read val for transform
if len(self.input_valset) > 0:
self.transform_switch = True
self.val = pd.read_csv(
str(Path(self.input_valset[0]["path"], self.input_valset[0]["name"])), index_col=index_col)
def feature_binning(self) -> None:
"""Parse and execute feature binning.
Returns: None
"""
logger.info("Start binning")
nan_l = self.input_trainset[0]["nan_list"]
method = self.train_params["binning"]['method']
bin_num = self.train_params["binning"]["bins"]
time_start = time.time()
tmp = pd.Series(self.df.columns).apply(
lambda x: fintech_cut(self.df[x], bin_num, nan_l, method))
def get_cut_result(result):
self.df[result[0].name] = result[0]
self.binning_split.update({result[0].name: result[1]})
self.woe_map.update(result[2])
tmp.apply(lambda x: get_cut_result(x))
time_end = time.time()
logger.info("Cost time of binning is: {}s".format(
time_end - time_start))
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
file_path = f'{self.save_dir}/{self.output["split_points"]["name"]}'
with open(file_path, "w") as wf:
json.dump(self.binning_split, wf)
logger.info("Binning split points saved as {}.".format(file_path))
if isinstance(self.y, pd.Series):
self.df = pd.concat([self.y, self.df], axis=1)
| 8,207 | 35.642857 | 107 | py |
XFL | XFL-master/python/algorithm/framework/vertical/binning_woe_iv/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import time
import numpy as np
import pandas as pd
from service.fed_control import ProgressCalculator
from common.checker.matcher import get_matched_config
from common.checker.x_types import All
from common.communication.gRPC.python.channel import BroadcastChannel
from common.crypto.paillier.paillier import Paillier, PaillierCiphertext
from common.utils.logger import logger
from .base import VerticalBinningWoeIvBase
class VerticalBinningWoeIvLabelTrainer(VerticalBinningWoeIvBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""
Args:
train_conf: training parameters
*args:
**kwargs:
"""
self.sync_channel = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, label=True, *args, **kwargs)
self.neg_total_count, self.pos_total_count = 0, 0
logger.info("node-1:successfully binning.")
self.woe_dict_total = {}
self.iv_dict_total = {}
self.neg_bin_count = {}
self.pos_bin_count = {}
self.neg_bin_ratio = {}
self.pos_bin_ratio = {}
self.broadcast_channel = BroadcastChannel(name="vertical_binning_woe_iv_channel")
def _sync_config(self, config):
sync_rule = {
"train_info": All()
}
config_to_sync = get_matched_config(config, sync_rule)
self.sync_channel.broadcast(config_to_sync)
def fit(self):
# broadcast_channel = BroadcastChannel(name="vertical_binning_woe_iv_channel")
encryption_config_pre = self.train_params["encryption"]
encryption_method = list(encryption_config_pre.keys())[0].lower()
encryption_config = encryption_config_pre[encryption_method]
# if encryption_method == "paillier":
# pri_context = Paillier.context(encryption_config["key_bit_size"], djn_on=encryption_config["djn_on"])
# self.broadcast_channel.broadcast(pri_context.to_public().serialize(), use_pickle=False)
# elif encryption_method == "plain":
# pass
# else:
# raise ValueError(
# f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'plain'.")
# logger.info("Start calculate host IV with WOE values.")
self.label_trainer_woe_iv()
if encryption_method == "paillier":
pri_context = Paillier.context(encryption_config["key_bit_size"], djn_on=encryption_config["djn_on"])
self.broadcast_channel.broadcast(pri_context.to_public().serialize(), use_pickle=False)
num_cores = -1 if encryption_config["parallelize_on"] else 1
label = self.df[["y"]].to_numpy().flatten().astype(np.int32)
logger.info(f"Encrypting label using {encryption_method} method.")
en_label = Paillier.encrypt(pri_context,
label,
precision=encryption_config["precision"],
obfuscation=True,
num_cores=num_cores)
logger.info("Encryption complete.")
self.broadcast_channel.broadcast(Paillier.serialize(en_label), use_pickle=False)
elif encryption_method == "plain":
id_label_pair = self.df[["y"]]
self.broadcast_channel.broadcast(id_label_pair, use_pickle=True)
feedback_list = self.broadcast_channel.collect()
assert len(self.broadcast_channel.remote_ids) == len(feedback_list)
for uid, feedback in zip(self.broadcast_channel.remote_ids, feedback_list):
client_woe_dict, client_iv_dict = {}, {}
if encryption_method == "paillier":
logger.info(f"Decrypting woe_feedback using {encryption_method} method.")
for _id, feature in feedback["woe_feedback_list"].items():
c = feature.apply(lambda x: PaillierCiphertext.deserialize_from(pri_context, x))
feedback["woe_feedback_list"][_id] = c.apply(lambda x: Paillier.decrypt(pri_context,
x,
dtype='float',
num_cores=num_cores))
logger.info("Decryption Complete.")
woe_feedback_list, bins_count = feedback["woe_feedback_list"], feedback["bins_count"]
logger.info("Start calculate woe for trainer")
time_s = time.time()
for k, v in woe_feedback_list.items():
# featName = "{}_{}".format(uid, k)
client_woe_dict[k], client_iv_dict[k] = {}, 0
neg_ = bins_count[k] - v
pos_prob, neg_prob = (v / self.pos_total_count), (neg_ / self.neg_total_count)
pos_prob = pos_prob.apply(lambda x: 1e-7 if x == 0 else x)
neg_prob = neg_prob.apply(lambda x: 1e-7 if x == 0 else x)
woe_pre = pos_prob / neg_prob
woe = woe_pre.apply(lambda x: float("%.6f" % math.log(x)))
# woe.index = pd.Series(woe.index).apply(lambda x: int(x))
# v.index = pd.Series(v.index).apply(lambda x: int(x))
# neg_.index = pd.Series(neg_.index).apply(lambda x: int(x))
self.pos_bin_count[k] = v.to_dict()
self.neg_bin_count[k] = neg_.to_dict()
pos_prob = pos_prob.apply(lambda x: float("%.6f" % x))
neg_prob = neg_prob.apply(lambda x: float("%.6f" % x))
self.pos_bin_ratio[k] = pos_prob.to_dict()
self.neg_bin_ratio[k] = neg_prob.to_dict()
client_woe_dict[k] = woe.to_dict()
client_iv_dict[k] += float("%.6f" % np.sum((pos_prob - neg_prob) * woe))
logger.info("Trainer woe cost:" + str(time.time() - time_s))
logger.info("Calculate host IV with WOE values completed.")
# logger.info("Host WOE dictionary: {}".format(client_woe_dict))
# logger.info("Host IV dictionary: {}".format(client_iv_dict))
# Save host dicts
self.woe_dict_total.update(client_woe_dict)
self.iv_dict_total.update(client_iv_dict)
guest_file_path = f'{self.save_dir}/{self.output["iv"]["name"]}'
with open(guest_file_path, "w") as wf:
json.dump({"woe": self.woe_dict_total, "iv": self.iv_dict_total, "count_neg": self.neg_bin_count,
"count_pos": self.pos_bin_count, "ratio_pos": self.pos_bin_ratio,
"ratio_neg": self.neg_bin_ratio}, wf)
logger.info("Host {} WOE & IV values saved as {}.".format(uid, guest_file_path))
ProgressCalculator.finish_progress()
def label_trainer_woe_iv(self):
logger.info("Start calculate Guest IV with WOE values.")
woe_dict, iv_dict = {}, {}
# count_neg_dict, count_pos_dict, bins_total, percentage, bad_rate = {}, {}, {}, {}, {}
# good_percentage, bad_percentage = {}, {}
# # count total label = 0, 1
total_count = self.df.groupby("y")["y"].count()
self.neg_total_count, self.pos_total_count = total_count[0], total_count[1]
feat_woe = set(self.df.columns).difference(set("y"))
logger.info("Start calculate woe for label trainer")
time_s = time.time()
for feature in feat_woe:
woe_dict[feature], iv_dict[feature] = {}, 0
feature_df = self.df[[feature, "y"]]
tmp_count = feature_df.groupby([feature])['y'].agg({'count', 'sum'})
neg_bin_count = tmp_count['count'] - tmp_count['sum']
pos_prob, neg_prob = (tmp_count['sum'] / self.pos_total_count), (neg_bin_count / self.neg_total_count)
pos_prob = pos_prob.apply(lambda x: 1e-7 if x == 0 else x)
neg_prob = neg_prob.apply(lambda x: 1e-7 if x == 0 else x)
woe_pre = pos_prob / neg_prob
woe = woe_pre.apply(lambda x: float("%.6f" % math.log(x)))
iv_dict[feature] = float("%.6f" % np.sum((pos_prob - neg_prob) * woe))
woe.index = pd.Series(woe.index).apply(lambda x: self.woe_map[feature][x])
tmp_count['sum'].index = pd.Series(tmp_count['sum'].index).apply(lambda x: self.woe_map[feature][x])
neg_bin_count.index = pd.Series(neg_bin_count.index).apply(lambda x: self.woe_map[feature][x])
self.pos_bin_count[feature] = tmp_count['sum'].to_dict()
self.neg_bin_count[feature] = neg_bin_count.to_dict()
pos_prob = pos_prob.apply(lambda x: float("%.6f" % x))
neg_prob = neg_prob.apply(lambda x: float("%.6f" % x))
pos_prob.index = pd.Series(pos_prob.index).apply(lambda x: self.woe_map[feature][x])
neg_prob.index = pd.Series(neg_prob.index).apply(lambda x: self.woe_map[feature][x])
self.pos_bin_ratio[feature] = pos_prob.to_dict()
self.neg_bin_ratio[feature] = neg_prob.to_dict()
woe_dict[feature] = woe.to_dict()
logger.info("label trainer cost:" + str(time.time() - time_s))
logger.info("Calculate Guest IV with WOE values completed.")
# # logger.info("Guest WOE dictionary: {}".format(woe_dict))
# # logger.info("Guest IV dictionary: {}".format(iv_dict))
# # Save guest dicts
# save_dir = self.output["dataset"]["path"]
# if not os.path.exists(save_dir): os.makedirs(save_dir)
# guest_file_path = f'{save_dir}/{self.output["dataset"]["name"]}.json'
# with open(guest_file_path, "a") as wf:
# json.dump({"woe": woe_dict, "iv": iv_dict, "count_neg": count_neg_dict, "count_pos": count_pos_dict,
# "bins_total": bins_total, "percentage": percentage, "bad_rate": bad_rate,
# "good_percentage": good_percentage, "bad_percentage": bad_percentage}, wf)
self.woe_dict_total.update(woe_dict)
self.iv_dict_total.update(iv_dict)
# logger.info("Guest WOE & IV values saved as {}.".format(guest_file_path))
logger.info("Guest WOE & IV values saved")
| 10,999 | 49.925926 | 115 | py |
XFL | XFL-master/python/algorithm/framework/vertical/binning_woe_iv/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.