repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
XFL | XFL-master/python/algorithm/framework/vertical/binning_woe_iv/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import numpy as np
import pandas as pd
from pathos.multiprocessing import ProcessPool
from common.communication.gRPC.python.channel import BroadcastChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.logger import logger
from common.utils.utils import update_dict
from ..pearson.base import VerticalPearsonBase
from .base import VerticalBinningWoeIvBase
class VerticalBinningWoeIvTrainer(VerticalBinningWoeIvBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""[summary]
Args:
train_conf (dict): [description]
model_conf (dict, optional): [description]. Defaults to None.
"""
self.sync_channel = BroadcastChannel(name="sync")
conf = self._sync_config()
update_dict(train_conf, conf)
super().__init__(train_conf, label=False, *args, **kwargs)
self.encrypt_id_label_pair = None
# self.pool = ProcessPool(self.extra_config["poolNum"])
self.bins_count = {}
self.woe_feedback_list = {}
self.broadcast_channel = BroadcastChannel(name="vertical_binning_woe_iv_channel")
self.feature_mapping = VerticalPearsonBase.string_encryption(self.df.columns)
self.df.columns = list(pd.Series(self.df.columns).apply(lambda x: self.feature_mapping[x]))
self.woe_map = dict(
zip(pd.Series(self.woe_map.keys()).apply(lambda x: self.feature_mapping[x]), self.woe_map.values()))
logger.info("node-2:successfully binning.")
def _sync_config(self):
config = self.sync_channel.recv()
return config
def woe_pre(self, feat):
feature_df = self.encrypt_id_label_pair.join(self.df[feat])
tmp = feature_df.groupby([feat])['y'].agg({'count', 'sum'})
return tmp
def fit(self):
encryption_config_pre = self.train_params["encryption"]
encryption_method = list(encryption_config_pre.keys())[0].lower()
#
if encryption_method == "paillier":
# id = self.df.index.tolist()
pub_context = self.broadcast_channel.recv(use_pickle=False)
pub_context = Paillier.context_from(pub_context)
en_label = self.broadcast_channel.recv(use_pickle=False)
en_label = Paillier.ciphertext_from(pub_context, en_label)
# if len(list(id)) != len(en_label):
# raise IndexError(f"Table size not match. Local table size is {len(list(id))}, "
# f"incoming table size is {len(en_label)}")
self.encrypt_id_label_pair = pd.DataFrame(en_label).rename(columns={0: 'y'})
self.encrypt_id_label_pair.index = self.df.index
logger.info("Start count bins for trainer")
time_s = time.time()
# tmp = self.pool.map(self.woe_pre, list(self.df.columns))
def woe_pre_plus(batch_data):
logger.info("Start pool map")
_tmp = pd.DataFrame(columns=['y', 'col_value', 'col_name'],
index=range(len(batch_data[0]) * batch_size))
for _id in range(len(batch_data)):
col = batch_data[_id].columns[0]
batch_data[_id]['col_name'] = col
batch_data[_id] = batch_data[_id].rename(columns={col: 'col_value'})
batch_data[_id] = self.encrypt_id_label_pair.join(batch_data[_id])
batch_data[_id].index = range(len(batch_data[0]) * _id, len(batch_data[0]) * (_id + 1))
_tmp.loc[len(batch_data[0]) * _id:len(batch_data[0]) * (_id + 1), :] = batch_data[_id]
# tmp = pd.concat([tmp, feat])
tmp_gp = _tmp.groupby(['col_name', 'col_value'])['y'].agg({'count', 'sum'})
del _tmp, batch_data
bins_count = dict(zip(tmp_gp.index.levels[0], [tmp_gp.loc[ii]['count']
for ii in tmp_gp.index.levels[0]]))
woe_feedback_list = dict(
zip(tmp_gp.index.levels[0], [tmp_gp.loc[ii]['sum'] for ii in tmp_gp.index.levels[0]]))
logger.info("One pool ends")
return bins_count, woe_feedback_list
data_batch = []
col_name = list(self.df.columns)
batch_size = 30
div = int(np.ceil(len(col_name) / batch_size))
for i in range(div):
if i == div - 1:
num_lst = list(range(i * batch_size, len(col_name)))
t = [pd.DataFrame(self.df[col_name[val]]) for val in num_lst]
else:
num_lst = list(range(i * batch_size, (i + 1) * batch_size))
t = [pd.DataFrame(self.df[col_name[val]]) for val in num_lst]
data_batch.append(t)
del self.df
woe_pre_plus([pd.DataFrame([1, 2, 3])]) # improve coverage
with ProcessPool(self.train_params["max_num_cores"]) as pool:
tmp = pool.map(woe_pre_plus, data_batch)
for i in tmp:
self.bins_count.update(i[0])
self.woe_feedback_list.update(i[1])
logger.info("Trainer sum costs:" + str(time.time() - time_s))
elif encryption_method == "plain":
self.encrypt_id_label_pair = self.broadcast_channel.recv(use_pickle=True)
self.encrypt_id_label_pair = pd.DataFrame(self.encrypt_id_label_pair)
tmp = []
logger.info("Start count bins for trainer")
time_s = time.time()
pd.Series(self.df.columns).apply(lambda x: tmp.append(self.woe_pre(x)))
self.bins_count = dict(zip(self.df.columns, [i['count'] for i in tmp]))
self.woe_feedback_list = dict(zip(self.df.columns, [i['sum'] for i in tmp]))
logger.info("Trainer sum costs:" + str(time.time() - time_s))
# else:
# raise ValueError(
# f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'plain'.")
# woe name map
def woe_name_map(feat):
self.woe_feedback_list[feat].index = pd.Series(self.woe_feedback_list[feat].index).apply(
lambda x: self.woe_map[feat][x])
self.bins_count[feat].index = pd.Series(self.bins_count[feat].index).apply(lambda x: self.woe_map[feat][x])
pd.Series(self.woe_feedback_list.keys()).apply(lambda x: woe_name_map(x))
if encryption_method == "paillier":
for id_, feature in self.woe_feedback_list.items():
self.woe_feedback_list[id_] = feature.apply(lambda x: x.serialize())
send_msg = {"woe_feedback_list": self.woe_feedback_list, "bins_count": self.bins_count}
self.broadcast_channel.send(send_msg)
# save feature map
feature_map = self.output.get("iv", None)
if feature_map is not None:
host_file_path = f'{self.save_dir}/{feature_map["name"]}'
with open(host_file_path, "w") as wf:
json.dump({"feature_mapping": self.feature_mapping}, wf)
| 7,790 | 46.797546 | 119 | py |
XFL | XFL-master/python/algorithm/framework/vertical/logistic_regression/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
from pathlib import Path
from collections import OrderedDict
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from google.protobuf import json_format
from algorithm.framework.vertical.vertical_model_base import VerticalModelBase
from common.utils.model_io import ModelIO
from common.utils.logger import logger
from common.model.python.linear_model_pb2 import LinearModel
BLOCKCHAIN = False
class VerticalLogisticRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super(VerticalLogisticRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, 1, bias=bias)
self.linear.requires_grad_(False)
def forward(self, x):
return self.linear(x)
class VerticalLogisticRegressionBase(VerticalModelBase):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""_summary_
Args:
train_conf (dict): _description_
label (bool, optional): _description_. Defaults to False.
"""
super().__init__(train_conf)
self._parse_config()
self.train_conf = train_conf
self.model_conf = train_conf["model_info"].get("config")
self.label = label
self.schema = None
self.data_dim = None
self.model = None
self.train_dataloader, self.eval_dataloader = None, None
self.loss_function = None
self.metric_functions = {}
self._init_dataloader()
def _parse_config(self) -> None:
super()._parse_config()
self.model_name = self.model_info.get("name")
self.save_model_name = self.output.get("model", {}).get("name", "")
self.save_onnx_model_name = self.output.get("onnx_model", {}).get("name", "")
self.evaluation_path = self.save_dir
self.global_epoch = self.train_params.get("global_epoch")
self.batch_size = self.train_params.get("batch_size")
self.encryption_config = self.train_params.get("encryption")
self.optimizer_config = self.train_params.get("optimizer")
self.pretrain_model_path = self.input.get("pretrained_model", {}).get("path")
self.pretrain_model_name = self.input.get("pretrained_model", {}).get("name")
self.random_seed = self.train_params.get("random_seed")
self.early_stopping_config = self.train_params.get("early_stopping")
self.save_frequency = self.interaction_params.get("save_frequency")
self.save_probabilities = self.interaction_params.get("save_probabilities")
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def _init_model(self, bias: bool = False) -> None:
"""
Init logistic regression model.
Returns: None
"""
logger.info("Init model start.")
self.model = VerticalLogisticRegression(input_dim=self.data_dim, bias=bias)
# Load pretrained model if needed.
if self.pretrain_model_path is not None and self.pretrain_model_path != "":
if self.pretrain_model_name.split(".")[-1] == "model":
model_dict = ModelIO.load_torch_model(os.path.join(self.pretrain_model_path, self.pretrain_model_name))
self.model.load_state_dict(model_dict["state_dict"])
# elif self.pretrain_model_name.split(".")[-1] == "pmodel":
# checkpoint = self.load_from_proto(os.path.join(self.pretrain_model_path, self.pretrain_model_name))
# self.model.load_state_dict(checkpoint)
else:
raise NotImplementedError(
"Pretrained model {} does not support.".format(self.pretrain_model_name)
)
logger.info("Init model completed.")
def _init_dataloader(self) -> None:
"""
Load raw data.
Returns:
"""
logger.info("Init validation dataloader start.")
df_list = []
# Check file exists.
for ts in self.input_trainset:
file_path = os.path.join(ts.get("path"), ts.get("name"))
if not os.path.exists(file_path):
raise FileNotFoundError("File {} cannot be found.".format(file_path))
if ts.get("type") == "csv":
if ts.get("has_id"):
df_list.append(pd.read_csv(file_path, index_col=0))
else:
df_list.append(pd.read_csv(file_path))
else:
raise NotImplementedError(
"LDataset load method {} does not Implemented.".format(ts.get("type"))
)
node_train_df = pd.concat(df_list)
df_list = []
for vs in self.input_valset:
file_path = os.path.join(vs.get("path"), vs.get("name"))
if not os.path.exists(file_path):
raise FileNotFoundError("File {} cannot be found.".format(file_path))
if vs.get("type") == "csv":
if vs.get("has_id"):
df_list.append(pd.read_csv(file_path, index_col=0))
else:
df_list.append(pd.read_csv(file_path))
else:
raise NotImplementedError(
"Dataset load method {} does not Implemented.".format(vs.get("type"))
)
node_val_df = pd.concat(df_list)
self.schema = ','.join([_ for _ in node_train_df.columns if _ not in set(["y", "id"])])
if node_train_df.index.dtype == 'O':
node_train_df = node_train_df.reset_index(drop=True)
if node_val_df.index.dtype == 'O':
node_val_df = node_val_df.reset_index(drop=True)
if self.label:
# Check column y exists.
if "y" not in node_train_df.columns:
raise KeyError("Cannot found column y in train set.")
if "y" not in node_val_df.columns:
raise KeyError("Cannot found column y in val set.")
node_train_id = node_train_df.index.to_list()
node_train_label = node_train_df["y"].values # .tolist()
node_train_data = node_train_df.drop(labels=["y"], axis=1).values # .tolist()
assert len(node_train_label) == len(node_train_data)
node_val_id = node_val_df.index.to_list()
node_val_label = node_val_df["y"].values # .tolist()
node_val_data = node_val_df.drop(labels=["y"], axis=1).values # .tolist()
assert len(node_val_label) == len(node_val_data)
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(node_train_data, dtype=torch.float32),
torch.unsqueeze(torch.tensor(node_train_label), dim=-1),
torch.unsqueeze(torch.tensor(node_train_id), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.train_f_names = node_val_df.columns.tolist()[1:]
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(node_val_data, dtype=torch.float32),
torch.unsqueeze(torch.tensor(node_val_label), dim=-1),
torch.unsqueeze(torch.tensor(node_val_id), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(node_train_data).shape[-1]
logger.info("Data shape: {}.".format(list(torch.tensor(node_train_data).shape)))
else:
node_train_id = node_train_df.index.to_list()
node_train_data = node_train_df.values.tolist()
node_val_id = node_val_df.index.to_list()
node_val_data = node_val_df.values.tolist()
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(node_train_data, dtype=torch.float32),
torch.unsqueeze(torch.tensor(node_train_id), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.train_f_names = node_val_df.columns.tolist()
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(node_val_data, dtype=torch.float32),
torch.unsqueeze(torch.tensor(node_val_id), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(node_train_data).shape[-1]
logger.info("Data shape: {}.".format(list(torch.tensor(node_train_data).shape)))
logger.info("Init dataloader completed.")
# unused
@staticmethod
def load_from_proto(path: str):
with open(path, 'rb') as f:
b = f.read()
lr = LinearModel()
lr.ParseFromString(b)
d = json_format.MessageToDict(lr,
including_default_value_fields=True,
preserving_proto_field_name=True)
state_dict = OrderedDict()
for k, v in d.items():
state_dict[k] = torch.Tensor([v])
return state_dict
@staticmethod
def dump_as_proto(save_dir: str,
model_name: str,
state_dict: OrderedDict,
epoch: int = None,
final: bool = False,
suggest_threshold: float = None
):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
json_dict = dict()
for k, v in state_dict.items():
if isinstance(v, torch.Tensor):
json_dict[k.replace("linear.", "")] = v.tolist()[0]
model_info = {"state_dict": json_dict}
if suggest_threshold:
model_info["suggest_threshold"] = suggest_threshold
model_name_list = model_name.split(".")
name_prefix, name_postfix = ".".join(model_name_list[:-1]), model_name_list[-1]
if not final and epoch:
model_name = name_prefix + "_epoch_{}".format(epoch) + "." + name_postfix
else:
model_name = name_prefix + "." + name_postfix
model_path = os.path.join(save_dir, model_name)
lr = LinearModel()
json_format.ParseDict(model_info, lr)
with open(model_path, 'wb') as f:
f.write(lr.SerializeToString())
logger.info("model saved as: {}.".format(model_path))
return
| 11,213 | 40.227941 | 119 | py |
XFL | XFL-master/python/algorithm/framework/vertical/logistic_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import hashlib
import os
from pathlib import Path
import numpy as np
import pandas as pd
import tenseal as ts
import torch
from sklearn.metrics import confusion_matrix
import random
import pickle
from common.checker.matcher import get_matched_config
from common.checker.x_types import All
from common.communication.gRPC.python.channel import BroadcastChannel
from common.crypto.paillier.paillier import Paillier
from common.evaluation.metrics import ThresholdCutter
from common.utils.algo_utils import earlyStopping
from common.utils.logger import logger
from common.utils.model_io import ModelIO
from common.utils.utils import save_model_config
from service.fed_node import FedNode
from service.fed_control import ProgressCalculator
from .base import VerticalLogisticRegressionBase
from .base import BLOCKCHAIN
class VerticalLogisticRegressionLabelTrainer(VerticalLogisticRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
self.sync_channel = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, label=True, *args, **kwargs)
if self.random_seed is None:
self.random_seed = random.randint(-(1 << 32), 1 << 32)
self.sync_channel.broadcast(self.random_seed)
if BLOCKCHAIN:
logger.debug(
f"Broadcast random seed, SHA256: {hashlib.sha256(pickle.dumps(self.random_seed)).hexdigest()}")
self.set_seed(self.random_seed)
self.progress_calculator = ProgressCalculator(self.global_epoch, len(self.train_dataloader))
self._init_model(bias=True)
self.export_conf = [{
"class_name": "VerticalLogisticRegression",
"identity": self.identity,
"filename": self.save_onnx_model_name,
"input_dim": self.data_dim,
"bias": True,
"version": "1.4.0",
"input_schema": self.schema,
}]
self.es = earlyStopping(key=self.early_stopping_config["key"],
patience=self.early_stopping_config["patience"],
delta=self.early_stopping_config["delta"])
self.best_model = None
self.best_prediction_val = None
self.best_prediction_train = None
def _sync_config(self, config):
sync_rule = {
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"batch_size": All(),
"encryption": All(),
"optimizer": All(),
"early_stopping": All(),
"random_seed": All()
}
}
}
config_to_sync = get_matched_config(config, sync_rule)
self.sync_channel.broadcast(config_to_sync)
if BLOCKCHAIN:
logger.debug(
f"Sync config, SHA256: {hashlib.sha256(pickle.dumps(config_to_sync)).hexdigest()}")
def fit(self):
self.check_data()
logger.debug("Vertical logistic regression training start")
broadcast_channel = BroadcastChannel(
name="vertical_logistic_regression_channel")
encryption_config = self.encryption_config
# encryption_method = encryption_config["method"].lower()
encryption_method = list(self.encryption_config.keys())[0].lower()
private_context = None
num_cores = -1
pred_prob_list, y_list = [], []
if encryption_method == "ckks":
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config[encryption_method]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config[encryption_method]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config[
encryption_method]["global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
logger.debug("Broadcast ckks public keys.")
broadcast_channel.broadcast(
serialized_public_context, use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"SHA256: {hashlib.sha256(serialized_public_context).hexdigest()}")
logger.debug("Broadcast completed.")
elif encryption_method == "paillier":
num_cores = - \
1 if encryption_config[encryption_method]["parallelize_on"] else 1
private_context = Paillier.context(encryption_config[encryption_method]["key_bit_size"],
djn_on=encryption_config[encryption_method]["djn_on"])
logger.debug("Broadcast paillier public keys.")
serialized_public_context = private_context.to_public().serialize()
broadcast_channel.broadcast(
serialized_public_context, use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"SHA256: {hashlib.sha256(serialized_public_context).hexdigest()}")
logger.debug("Broadcast completed.")
elif encryption_method == "plain":
pass
else:
raise ValueError(f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', "
f"'ckks', 'plain'.")
loss_func = torch.nn.BCELoss()
for epoch in range(1, self.global_epoch + 1):
training_cm = np.zeros((2, 2))
training_pred_prob_list, training_y_list, training_metric = [], [], {}
for batch_idx, (x_batch, y_batch, _) in enumerate(self.train_dataloader):
x_batch = x_batch.to(self.device)
y_batch = y_batch.to(self.device)
# compute theta_scheduler * x_scheduler
pred_label_trainer = self.model(x_batch)
# collect predict result from trainers.
pred_trainer_list = broadcast_channel.collect()
logger.debug("Received predictions from trainers, length of collect list is {}."
.format(len(pred_trainer_list)))
if BLOCKCHAIN:
logger.debug(
f"SHA256: {hashlib.sha256(pickle.dumps(pred_trainer_list)).hexdigest()}")
# Add predictions.
pred_total = torch.clone(pred_label_trainer)
for pred_trainer in pred_trainer_list:
pred_total += pred_trainer
pred_total = torch.sigmoid(pred_total)
logger.debug("Aggregated predictions.")
# Calculate gradients.
pred_residual = y_batch - pred_total
if encryption_method == "ckks":
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
broadcast_channel.broadcast(
serialized_enc_pred_residual, use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"Broadcast encrypted pred residual, SHA256: {hashlib.sha256(serialized_enc_pred_residual).hexdigest()}")
elif encryption_method == "paillier":
enc_pred_residual = Paillier.encrypt(private_context,
pred_residual.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
serialized_enc_pred_residual = Paillier.serialize(
enc_pred_residual)
broadcast_channel.broadcast(
serialized_enc_pred_residual, use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"Broadcast encrypted pred residual, SHA256: {hashlib.sha256(serialized_enc_pred_residual).hexdigest()}")
elif encryption_method == "plain":
broadcast_channel.broadcast(pred_residual)
if BLOCKCHAIN:
logger.debug(
f"Broadcast pred residual, SHA256: {hashlib.sha256(pickle.dumps(pred_residual)).hexdigest()}")
training_pred_prob_list += torch.squeeze(
pred_total, dim=-1).tolist()
training_y_list += torch.squeeze(y_batch, dim=-1).tolist()
if self.echo_training_metrics:
pred_total = (pred_total > 0.5).float()
training_cm += confusion_matrix(
y_true=y_batch.detach().numpy(), y_pred=pred_total.detach().numpy())
# Gradients for label trainer.
logger.debug("Calculate gradients for label trainer.")
if self.optimizer_config['p'] == 1:
gradient_label_trainer_linear = -torch.mm(pred_residual.t(), x_batch) / x_batch.shape[0] + (
self.optimizer_config['alpha'] * (torch.abs(self.model.linear.weight)
/ self.model.linear.weight)
) / x_batch.shape[0]
elif self.optimizer_config['p'] == 2:
gradient_label_trainer_linear = -torch.mm(pred_residual.t(), x_batch) / x_batch.shape[0] + (
2 * self.optimizer_config['alpha'] * self.model.linear.weight) / x_batch.shape[0]
elif self.optimizer_config['p'] == 0:
gradient_label_trainer_linear = - \
torch.mm(pred_residual.t(), x_batch) / x_batch.shape[0]
else:
raise NotImplementedError(
"Regular P={} not implement.".format(self.optimizer_config['p']))
gradient_label_trainer_bias = -torch.mean(pred_residual, dim=0)
gradient_label_trainer_linear = gradient_label_trainer_linear.t()
# Collect trainers noise gradients, decrypt and broadcast.
if encryption_method == "ckks":
gradient_list_trainer_linear = broadcast_channel.collect(
use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"Collect gradient list, SHA256: {hashlib.sha256(pickle.dumps(gradient_list_trainer_linear)).hexdigest()}")
gradient_list_trainer_linear = [ts.ckks_vector_from(private_context, i).decrypt() for i in
gradient_list_trainer_linear]
broadcast_channel.scatter(gradient_list_trainer_linear)
if BLOCKCHAIN:
logger.debug(
f"Scatter gradient, SHA256: {hashlib.sha256(pickle.dumps(gradient_list_trainer_linear)).hexdigest()}")
elif encryption_method == "paillier":
gradient_list_trainer_linear = broadcast_channel.collect(
use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"Collect random seed, SHA256: {hashlib.sha256(pickle.dumps(gradient_list_trainer_linear)).hexdigest()}")
gradient_list_trainer_linear = [
Paillier.decrypt(private_context, Paillier.ciphertext_from(None, c), dtype='float',
num_cores=num_cores) for c in gradient_list_trainer_linear]
broadcast_channel.scatter(gradient_list_trainer_linear)
if BLOCKCHAIN:
logger.debug(
f"Scatter gradient, SHA256: {hashlib.sha256(pickle.dumps(gradient_list_trainer_linear)).hexdigest()}")
elif encryption_method == "plain":
pass
self.model.linear.weight -= (gradient_label_trainer_linear *
self.optimizer_config["lr"]).t()
self.model.linear.bias -= (gradient_label_trainer_bias *
self.optimizer_config["lr"]).t()
logger.debug("Weights update completed.")
# calculate and update the progress of the training
self.progress_calculator.cal_custom_progress(epoch, batch_idx+1)
train_loss = loss_func(
torch.tensor(training_pred_prob_list, dtype=torch.float32),
torch.tensor(training_y_list, dtype=torch.float32)
).detach().item()
self._calc_metrics(np.array(training_y_list, dtype=float), np.array(
training_pred_prob_list), epoch)
# Validation step should be added here.
cm = np.zeros((2, 2))
pred_prob_list, y_list = [], []
for batch_idx, (x_batch, y_batch, _) in enumerate(self.val_dataloader):
x_batch = x_batch.to(self.device)
y_batch = y_batch.to(self.device)
pred_label_trainer = self.model(x_batch)
pred_trainer_list = broadcast_channel.collect()
if BLOCKCHAIN:
logger.debug(
f"Collect pred, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer_list)).hexdigest()}")
# Add predictions.
pred_total = torch.clone(pred_label_trainer)
for pred_trainer in pred_trainer_list:
pred_total += pred_trainer
pred_total = torch.sigmoid(pred_total)
pred_prob_list += torch.squeeze(pred_total, dim=-1).tolist()
y_list += torch.squeeze(y_batch, dim=-1).tolist()
pred_total = (pred_total > 0.5).float()
cm += confusion_matrix(y_true=y_batch.detach().numpy(),
y_pred=pred_total.detach().numpy())
metric = self._calc_metrics(np.array(y_list, dtype=float), np.array(pred_prob_list),
epoch, stage="val")
val_loss = loss_func(
torch.tensor(pred_prob_list, dtype=torch.float32),
torch.tensor(y_list, dtype=torch.float32)
).detach().item()
try:
# loss_file = self.train_conf['output']['plot_loss']['name']
logger.info(f"Writing loss for epoch {epoch}")
self._write_loss(train_loss, val_loss, epoch)
except Exception:
pass
if self.early_stopping_config["patience"] > 0:
early_stop_flag, save_flag = self.es(metric)
else:
early_stop_flag, save_flag = False, True
if save_flag:
self.best_model = copy.deepcopy(self.model)
self.best_prediction_train = copy.deepcopy(
training_pred_prob_list)
self.best_prediction_val = copy.deepcopy(
np.array(pred_prob_list))
early_stop = [early_stop_flag, save_flag,
self.early_stopping_config["patience"]]
broadcast_channel.broadcast(early_stop,
use_pickle=True)
if BLOCKCHAIN:
logger.debug(
f"Broadcast early stop flag, SHA256: {hashlib.sha256(pickle.dumps(early_stop))}")
if early_stop_flag:
break
# self.dump_as_proto(save_dir=self.save_dir, model_name=self.save_model_name,
# state_dict=self.best_model.state_dict(), final=True)
# # if self.save_probabilities:
# self._save_prob(best_model=self.best_model, channel=broadcast_channel)
# return None
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
if self.save_model_name.split(".")[-1] == "pmodel":
self.dump_as_proto(
save_dir=self.save_dir,
model_name=self.save_model_name,
state_dict=self.model.state_dict(),
epoch=epoch,
)
else:
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
epoch=epoch,
)
if self.save_onnx_model_name:
ModelIO.save_torch_onnx(
model=self.model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
epoch=epoch,
)
if self.early_stopping_config["patience"] <= 0:
self.best_model = copy.deepcopy(self.model)
self.best_prediction_train = copy.deepcopy(training_pred_prob_list)
self.best_prediction_val = copy.deepcopy(np.array(pred_prob_list))
self.save(y_list, training_y_list)
# if self.save_probabilities:
self._save_prob(best_model=self.best_model, channel=broadcast_channel)
self._save_feature_importance(broadcast_channel)
# prepare data for writing
train_label = np.array(training_y_list, dtype=float)
train_y_pred = np.array(training_pred_prob_list, dtype=float)
val_label = np.array(y_list, dtype=float)
val_y_pred = np.array(pred_prob_list, dtype=float)
# write roc data
logger.info("Writing roc data...")
self._write_roc_data(train_label, train_y_pred,
val_label, val_y_pred)
# write ks data
logger.info("Writing ks data...")
self._write_ks_data(train_label, train_y_pred,
val_label, val_y_pred)
# write lift and gain
logger.info("Writing lift and gain data...")
self._write_lift_gain_data(
train_label, train_y_pred, val_label, val_y_pred)
# write pr curve
logger.info("Writing pr curve data")
self._write_pr_data(
train_label, train_y_pred, val_label, val_y_pred)
# write feature importance
logger.info("Writing feature importance data")
logger.info("Self importances: {}".format(self.feature_importances_))
self._write_feature_importance()
ProgressCalculator.finish_progress()
def save(self, y_list, training_y_list=None):
save_model_config(stage_model_config=self.export_conf,
save_path=Path(self.save_dir))
if not os.path.exists(self.evaluation_path):
os.makedirs(self.evaluation_path)
# dump out ks plot
suggest_threshold = 0.5
if "ks" in self.metric_config or "auc_ks" in self.metric_config:
tc = ThresholdCutter(os.path.join(
self.save_dir, self.output.get("ks_plot_val")["name"]))
# tc.cut_by_value(np.array(y_list, dtype=float),
# self.best_prediction_val)
# suggest_threshold = tc.bst_threshold
# tc.save()
if self.interaction_params.get("echo_training_metrics"):
tc = ThresholdCutter(os.path.join(
self.save_dir, self.output.get("ks_plot_val")["name"]))
# tc.cut_by_value(
# np.array(training_y_list, dtype=float), self.best_prediction_train)
# tc.save()
if self.save_model_name:
if self.save_model_name.split(".")[-1] == "pmodel":
self.dump_as_proto(
save_dir=self.save_dir,
model_name=self.save_model_name,
state_dict=self.best_model.state_dict(),
final=True,
suggest_threshold=suggest_threshold
)
else:
ModelIO.save_torch_model(
state_dict=self.best_model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
meta_dict={"suggest_threshold": suggest_threshold}
)
if self.save_onnx_model_name:
ModelIO.save_torch_onnx(
model=self.best_model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
)
def _save_feature_importance(self, channel):
res = {"owner_id": [], "fid": [], "importance": []}
other_weight_list = channel.collect()
if BLOCKCHAIN:
logger.debug(
f"Collect weight list, SHA256: {hashlib.sha256(pickle.dumps(other_weight_list)).hexdigest()}")
for (owner_id, weights, f_names) in other_weight_list:
for fid, weight in enumerate(weights):
res["owner_id"].append(owner_id)
# res["fid"].append(fid)
res["fid"].append(f_names[fid])
res["importance"].append(float(weight))
for fid, weight in enumerate(self.best_model.state_dict()["linear.weight"][0]):
# res["owner_id"].append(FedNode.node_id)
res["owner_id"].append(FedNode.node_name)
# res["fid"].append(fid)
f_name = self.train_f_names[fid]
res["fid"].append(f_name)
res["importance"].append(float(weight))
res = pd.DataFrame(res).sort_values(
by="importance", key=lambda col: np.abs(col), ascending=False)
res.to_csv(
# Path(self.save_dir, "feature_importances.csv"), header=True, index=False, float_format="%.6g"
Path(self.save_dir, self.output["feature_importance"]["name"]), header=True, index=False,
float_format="%.6g"
)
# prepare feature_importances_ attribute
feature_importances_ = {}
for _, row in res.iterrows():
feature_importances_[(row['owner_id'], row['fid'])] = row['importance']
self.feature_importances_ = feature_importances_
def _save_prob(self, best_model, channel):
if self.interaction_params.get("write_training_prediction"):
train_prob_list, train_label_list, train_id_list = [], [], []
for batch_idx, (x_batch, y_batch, id_batch) in enumerate(self.train_dataloader):
x_batch, y_batch, id_batch = x_batch.to(self.device), y_batch.to(
self.device), id_batch.to(self.device)
pred_label_trainer = best_model(x_batch)
pred_trainer_list = channel.collect()
if BLOCKCHAIN:
logger.debug(
f"Collect pred list, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer_list)).hexdigest()}")
pred_total = torch.clone(pred_label_trainer)
for pred_trainer in pred_trainer_list:
pred_total += pred_trainer
pred_total = torch.sigmoid(pred_total)
train_id_list += torch.squeeze(id_batch, dim=-1).tolist()
train_label_list += torch.squeeze(y_batch, dim=-1).tolist()
train_prob_list += torch.squeeze(pred_total, dim=-1).tolist()
self._write_prediction(
train_label_list, train_prob_list, train_id_list, final=True)
if self.interaction_params.get("write_validation_prediction"):
val_prob_list, val_label_list, val_id_list = [], [], []
for batch_idx, (x_batch, y_batch, id_batch) in enumerate(self.val_dataloader):
x_batch, y_batch, id_batch = x_batch.to(self.device), y_batch.to(
self.device), id_batch.to(self.device)
pred_label_trainer = best_model(x_batch)
pred_trainer_list = channel.collect()
if BLOCKCHAIN:
logger.debug(
f"Collect pred list, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer_list)).hexdigest()}")
pred_total = torch.clone(pred_label_trainer)
for pred_trainer in pred_trainer_list:
pred_total += pred_trainer
pred_total = torch.sigmoid(pred_total)
val_id_list += torch.squeeze(id_batch, dim=-1).tolist()
val_label_list += torch.squeeze(y_batch, dim=-1).tolist()
val_prob_list += torch.squeeze(pred_total, dim=-1).tolist()
self._write_prediction(
val_label_list, val_prob_list, val_id_list, stage="val", final=True)
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com")
n = self.data_dim
dims = dim_channel.collect()
if BLOCKCHAIN:
logger.debug(
f"Collect dim, SHA256: {hashlib.sha256(pickle.dumps(dims)).hexdigest()}")
for dim in dims:
n += dim
if n <= 0:
raise ValueError("Number of the feature is zero. Stop training.")
| 26,570 | 47.13587 | 135 | py |
XFL | XFL-master/python/algorithm/framework/vertical/logistic_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/vertical/logistic_regression/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import random
import hashlib
import pickle
import secrets
from pathlib import Path
import numpy as np
import tenseal as ts
import torch
from common.communication.gRPC.python.channel import BroadcastChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.logger import logger
from common.utils.utils import update_dict
from service.fed_node import FedNode
from common.utils.model_io import ModelIO
from common.utils.utils import save_model_config
from .base import VerticalLogisticRegressionBase
from .base import BLOCKCHAIN
class VerticalLogisticRegressionTrainer(VerticalLogisticRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
self.sync_channel = BroadcastChannel(name="sync")
conf = self._sync_config()
update_dict(train_conf, conf)
super().__init__(train_conf, label=False, *args, **kwargs)
self._init_model()
self.export_conf = [{
"class_name": "VerticalLogisticRegression",
"identity": self.identity,
"filename": self.save_onnx_model_name,
"input_dim": self.data_dim,
"bias": False,
"version": "1.4.0",
"input_schema": self.schema,
}]
if self.random_seed is None:
self.random_seed = self.sync_channel.recv()
if BLOCKCHAIN:
logger.debug(f"Sync random seed, SHA256: {hashlib.sha256(pickle.dumps(self.random_seed)).hexdigest()}")
self.set_seed(self.random_seed)
self.best_model = None
def _sync_config(self):
config = self.sync_channel.recv()
if BLOCKCHAIN:
logger.debug(f"Sync config, SHA256: {hashlib.sha256(pickle.dumps(config)).hexdigest()}")
return config
def fit(self):
""" train model
Model parameters need to be updated before fitting.
"""
self.check_data()
patient = -1
# encryption_config = self.encryption_config
# encryption_method = encryption_config["method"].lower()
encryption_method = list(self.encryption_config.keys())[0].lower()
logger.info("Vertical logistic regression training start")
broadcast_channel = BroadcastChannel(name="vertical_logistic_regression_channel")
public_context = None
if encryption_method == "ckks":
logger.debug("Receive ckks public key.")
public_context = broadcast_channel.recv(use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"SHA256: {hashlib.sha256(public_context).hexdigest()}")
public_context = ts.context_from(public_context)
logger.debug("Public key received.")
elif encryption_method == "paillier":
logger.debug("Receive paillier public key.")
public_context = broadcast_channel.recv(use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"SHA256: {hashlib.sha256(public_context).hexdigest()}")
public_context = Paillier.context_from(public_context)
logger.debug("Public key received.")
elif encryption_method == "plain":
pass
else:
raise ValueError(
f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'ckks', 'plain'.")
rng = secrets.SystemRandom()
for epoch in range(1, self.global_epoch + 1):
for batch_idx, (x_batch) in enumerate(self.train_dataloader):
x_batch = x_batch[0].to(self.device)
# compute theta_trainer * x_trainer
pred_trainer = self.model(x_batch)
# send predict result to label trainer.
logger.debug("Send predict result to label trainer.")
broadcast_channel.send(pred_trainer)
if BLOCKCHAIN:
logger.debug(f"Broadcast pred, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer)).hexdigest()}")
if encryption_method == "ckks":
pred_residual = broadcast_channel.recv(use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"SHA256: {hashlib.sha256(pred_residual).hexdigest()}")
pred_residual = ts.ckks_vector_from(public_context, pred_residual)
elif encryption_method == "paillier":
pred_residual = broadcast_channel.recv(use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"SHA256: {hashlib.sha256(pred_residual).hexdigest()}")
pred_residual = Paillier.ciphertext_from(public_context, pred_residual)
elif encryption_method == "plain":
pred_residual = broadcast_channel.recv()
if BLOCKCHAIN:
logger.debug(f"SHA256: {hashlib.sha256(pickle.dumps(pred_residual)).hexdigest()}")
logger.debug("Received prediction residual from label trainer.")
# Compute gradients for trainer.
logger.debug("Calculate gradients for trainer.")
if encryption_method == "ckks":
# Add noise
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
x_batch_numpy = x_batch.numpy()
# avoid bug in seal ckks when a column is all zero
sign = 1 if random.randint(0, 1) == 1 else -1
x_batch_numpy[np.where(np.sum(x_batch_numpy, axis=0) == 0)] = 1e-7 * sign
ciphertext = pred_residual.matmul(x_batch_numpy)
noised_gradient_trainer_linear = ciphertext + noise
# Send to label trainer
serialized_gradient = noised_gradient_trainer_linear.serialize()
broadcast_channel.send(serialized_gradient, use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"Send gradient, SHA256: {hashlib.sha256(serialized_gradient).hexdigest()}")
gradient_trainer_linear = broadcast_channel.recv()
if BLOCKCHAIN:
logger.debug(f"Recv gradient, SHA256: {hashlib.sha256(pickle.dumps(gradient_trainer_linear)).hexdigest()}")
gradient_trainer_linear = np.array(gradient_trainer_linear, dtype=np.float32)
gradient_trainer_linear -= noise
gradient_trainer_linear = - gradient_trainer_linear / x_batch.shape[0]
gradient_trainer_linear = torch.FloatTensor(gradient_trainer_linear).unsqueeze(-1)
elif encryption_method == "paillier":
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
# Add noise
ciphertext = np.matmul(pred_residual, x_batch.numpy())
noised_gradient_trainer_linear = ciphertext + noise
# Send to label trainer
serialized_gradient = Paillier.serialize(noised_gradient_trainer_linear)
broadcast_channel.send(serialized_gradient, use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"Send gradient, SHA256: {hashlib.sha256(serialized_gradient).hexdigest()}")
gradient_trainer_linear = broadcast_channel.recv()
if BLOCKCHAIN:
logger.debug(f"Recv gradient, SHA256: {hashlib.sha256(pickle.dumps(gradient_trainer_linear)).hexdigest()}")
gradient_trainer_linear = np.array(gradient_trainer_linear, dtype=np.float32)
gradient_trainer_linear -= noise
gradient_trainer_linear = - gradient_trainer_linear / x_batch.shape[0]
gradient_trainer_linear = torch.FloatTensor(gradient_trainer_linear).unsqueeze(-1)
elif encryption_method == "plain":
gradient_trainer_linear = -torch.mm(pred_residual.t(), x_batch) / x_batch.shape[0]
gradient_trainer_linear = gradient_trainer_linear.t()
# Regular section
gradient_trainer_linear = gradient_trainer_linear.t()
if self.optimizer_config['p'] == 1:
gradient_trainer_linear += (self.optimizer_config['alpha'] * (
torch.abs(self.model.linear.weight) / self.model.linear.weight)) / x_batch.shape[0]
elif self.optimizer_config['p'] == 2:
gradient_trainer_linear += (2 * self.optimizer_config['alpha'] * self.model.linear.weight) / \
x_batch.shape[0]
elif self.optimizer_config['p'] == 0:
gradient_trainer_linear += 0
else:
raise NotImplementedError("Regular P={} not implement.".format(self.optimizer_config['p']))
gradient_trainer_linear = gradient_trainer_linear.t()
self.model.linear.weight -= (gradient_trainer_linear * self.optimizer_config["lr"]).t()
logger.debug("Weights update completed.")
for batch_idx, (x_batch) in enumerate(self.val_dataloader):
x_batch = x_batch[0].to(self.device)
pred_trainer = self.model(x_batch)
broadcast_channel.send(pred_trainer)
if BLOCKCHAIN:
logger.debug(f"Send pred, batch_idx {batch_idx}, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer)).hexdigest()}")
early_stop_flag, save_flag, patient = broadcast_channel.recv()
if BLOCKCHAIN:
logger.debug(f"Recv early stop flag, SHA256: {hashlib.sha256(pickle.dumps([early_stop_flag, save_flag, patient])).hexdigest()}")
if save_flag:
self.best_model = copy.deepcopy(self.model)
if early_stop_flag:
break
# self.dump_as_proto(save_dir=self.save_dir, model_name=self.save_model_name,
# state_dict=self.best_model.state_dict(), final=True)
# # if self.save_probabilities:
# self._save_prob(best_model=self.best_model, channel=broadcast_channel)
# return None
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
if self.save_model_name.split(".")[-1] == "pmodel":
self.dump_as_proto(
save_dir=self.save_dir,
model_name=self.save_model_name,
state_dict=self.model.state_dict(),
epoch=epoch
)
else:
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
epoch=epoch
)
if self.save_onnx_model_name is not None and self.save_onnx_model_name != "":
ModelIO.save_torch_onnx(
model=self.model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
epoch=epoch,
)
if patient <= 0:
self.best_model = copy.deepcopy(self.model)
save_model_config(stage_model_config=self.export_conf, save_path=Path(self.save_dir))
if self.save_model_name.split(".")[-1] == "pmodel":
self.dump_as_proto(
save_dir=self.save_dir,
model_name=self.save_model_name,
state_dict=self.best_model.state_dict(),
final=True,
)
else:
ModelIO.save_torch_model(
state_dict=self.best_model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
)
if self.save_onnx_model_name:
ModelIO.save_torch_onnx(
model=self.best_model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
)
# if self.save_probabilities:
self._save_prob(best_model=self.best_model, channel=broadcast_channel)
self._save_feature_importance(broadcast_channel)
def _save_prob(self, best_model, channel):
if self.interaction_params.get("write_training_prediction"):
for batch_idx, (x_batch) in enumerate(self.train_dataloader):
x_batch = x_batch[0].to(self.device)
pred_trainer = best_model(x_batch)
channel.send(pred_trainer)
if BLOCKCHAIN:
logger.debug(f"Send pred, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer)).hexdigest()}")
if self.interaction_params.get("write_validation_prediction"):
for batch_idx, (x_batch) in enumerate(self.val_dataloader):
x_batch = x_batch[0].to(self.device)
pred_trainer = best_model(x_batch)
channel.send(pred_trainer)
if BLOCKCHAIN:
logger.debug(f"Send pred, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer)).hexdigest()}")
def _save_feature_importance(self, channel):
# weight = (FedNode.node_id, self.best_model.state_dict()["linear.weight"][0])
weight = (FedNode.node_name, self.best_model.state_dict()["linear.weight"][0], self.train_f_names)
channel.send(weight)
if BLOCKCHAIN:
logger.debug(f"Send weight, SHA256: {hashlib.sha256(pickle.dumps(weight)).hexdigest()}")
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com")
dim_channel.send(self.data_dim)
if BLOCKCHAIN:
logger.debug(f"Send dim, SHA256: {hashlib.sha256(pickle.dumps(self.data_dim)).hexdigest()}")
| 15,010 | 47.579288 | 144 | py |
XFL | XFL-master/python/algorithm/framework/local/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/local/feature_preprocess/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
import torch
from algorithm.core.data_io import CsvReader
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from sklearn.impute import SimpleImputer
from service.fed_control import ProgressCalculator
from common.utils.utils import save_model_config
def data_impute(form, strategy, fill=None):
if strategy != 'constant':
return SimpleImputer(missing_values=form, strategy=strategy, copy=False)
else:
return SimpleImputer(missing_values=form, strategy=strategy, fill_value=fill, copy=False)
def config_combination(config_a, config_b):
if isinstance(config_a, list):
if isinstance(config_b, list):
config_combine = set(config_a + config_b)
else:
config_combine = set(config_a + [config_b])
else:
if isinstance(config_b, list):
config_combine = set([config_a] + config_b)
else:
config_combine = set([config_a] + [config_b])
if len(config_combine) > 1:
return list(config_combine)
elif len(config_combine) == 1:
return list(config_combine)[0]
else:
return config_combine
class LocalFeaturePreprocessLabelTrainer(TrainConfigParser):
def __init__(self, train_conf):
"""
Args:
train_conf:
"""
super().__init__(train_conf)
self.train = None
self.val = None
self.save_dir = None
self.transform_switch = False
self.impute_dict = {}
self.outlier_dict = {}
self.onehot_dict = {}
self.imputer_values_overall = []
self.imputer_strategy_overall = "mean" # default
self.imputer_fillvalue_overall = None # default
self.impute_dict = {}
self.onehot_feat_conf = {}
self.feature_flag = False # whether to impute by features
self.model_file = {}
self._init_data()
self._parse_config()
def _parse_config(self) -> None:
"""
parse algo config
missing_values: int, float, str or list, e.g. [-999, 999] or ["none", "null", "na", ""], default=null
strategy: str, default="mean"
fill_value: str or numerical value if strategy == "constant", default=None
"""
self.save_dir = self.output.get("path")
self.save_model = self.output.get("model", {})
if len(self.save_model) > 0:
self.save_model_name = self.save_model.get("name")
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.save_model_path = Path(self.save_dir, self.save_model_name)
self.export_conf = [{
"class_name": "LocalFeaturePreprocess",
"filename": self.save_model_name
}]
# missing config
self.missing_conf = self.train_params.get("missing", {})
if len(self.missing_conf) > 0:
self.missing_values_overall = self.missing_conf.get(
"missing_values", [np.NaN, '', None, ' ', 'nan', 'none', 'null', 'na', 'None'])
# transform null: None to default missing_values config
if self.missing_values_overall is None:
self.missing_values_overall = [np.NaN, '', None, ' ', 'nan', 'none', 'null', 'na', 'None']
self.missing_strategy_overall = self.missing_conf.get("strategy", "mean")
self.missing_fillvalue_overall = self.missing_conf.get("fill_value", None)
self.missing_feat_conf = self.missing_conf.get("missing_features", {})
self.imputer_values_overall = self.missing_values_overall
self.imputer_strategy_overall = self.missing_strategy_overall
self.imputer_fillvalue_overall = self.missing_fillvalue_overall
logger.info("Missing values need to be imputed")
# outlier config
self.outlier_conf = self.train_params.get("outlier", {})
if len(self.outlier_conf) > 0:
self.outlier_values_overall = self.outlier_conf.get("outlier_values", [])
self.outlier_feat_conf = self.outlier_conf.get("outlier_features", {})
self.imputer_values_overall = config_combination(self.imputer_values_overall, self.outlier_values_overall)
logger.info("Outlier values need to be imputed")
# initialize impute_dict
if self.imputer_values_overall:
self.impute_dict = dict(zip(self.columns, [{"missing_values": self.imputer_values_overall,
"strategy": self.imputer_strategy_overall,
"fill_value": self.imputer_fillvalue_overall}
for i in self.columns]))
# if different features have different missing_values
if len(self.missing_conf) > 0:
if len(self.missing_feat_conf) > 0:
for key in self.missing_feat_conf.keys():
if len(self.missing_feat_conf[key]) > 0:
missing_values_feat = self.missing_feat_conf[key].get("missing_values", None)
if missing_values_feat is not None:
self.impute_dict[key]["missing_values"] = missing_values_feat
self.feature_flag = True
missing_strategy_feat = self.missing_feat_conf[key].get("strategy", None)
if missing_strategy_feat is not None:
self.impute_dict[key]["strategy"] = missing_strategy_feat
self.feature_flag = True
missing_fillvalue_feat = self.missing_feat_conf[key].get("fill_value", None)
if missing_fillvalue_feat is not None:
self.impute_dict[key]["fill_value"] = missing_fillvalue_feat
self.feature_flag = True
# if different features have different outlier_values
if len(self.outlier_conf) > 0:
if len(self.outlier_feat_conf) > 0:
for key in self.outlier_feat_conf.keys():
if len(self.outlier_feat_conf[key]) > 0:
outlier_values_feat = self.outlier_feat_conf[key].get("outlier_values", None)
if outlier_values_feat is not None:
if key in self.impute_dict.keys():
self.impute_dict[key]["missing_values"] = config_combination(
self.impute_dict[key]["missing_values"], outlier_values_feat)
else:
self.impute_dict[key] = {}
self.impute_dict[key]["missing_values"] = outlier_values_feat
self.feature_flag = True
# check the three params
if len(self.impute_dict) > 0:
for key in self.impute_dict.keys():
if "strategy" not in self.impute_dict[key].keys():
self.impute_dict[key]["strategy"] = self.imputer_strategy_overall
self.impute_dict[key]["fill_value"] = self.imputer_fillvalue_overall
# onehot config
self.onehot_conf = self.train_params.get("onehot", {})
if len(self.onehot_conf) > 0:
self.onehot_feat_conf = self.onehot_conf.get("onehot_features", {})
# output config
self.save_trainset_name = self.output.get("trainset", {})
self.save_valset_name = self.output.get("valset", {})
def __load_data(self, config) -> CsvReader:
if len(config) > 1:
logger.warning("More than one dataset is not supported.")
config = config[0]
if config["type"] == "csv":
data_reader = CsvReader(path=os.path.join(config["path"], config["name"]), has_id=config["has_id"],
has_label=config["has_label"])
else:
raise NotImplementedError("Dataset type {} is not supported.".format(config["type"]))
return data_reader
def _init_data(self) -> None:
if len(self.input["trainset"]) > 0:
data: CsvReader = self.__load_data(self.input["trainset"])
self.train = data.table.set_index(data.ids)
self.label_name = data.label_name()
if self.label_name is not None:
self.train_label = self.train[[self.label_name]]
self.train = self.train.drop(columns=self.label_name)
self.columns = self.train.columns
self.train_ids = data.ids
else:
raise NotImplementedError("Trainset was not configured.")
if len(self.input["valset"]) > 0:
data: CsvReader = self.__load_data(self.input["valset"])
self.val = data.table.set_index(data.ids)
if self.label_name is not None:
self.val_label = self.val[[self.label_name]]
self.val = self.val.drop(columns=self.label_name)
self.val_ids = data.ids
def impute(self):
# fill missing_values for different features
def imputer_series(data, col, flag):
if flag == "train":
missing_value_new = self.impute_dict[col]["missing_values"]
if isinstance(missing_value_new, list) and len(missing_value_new) > 0:
data[col] = data[[col]].replace(self.impute_dict[col]["missing_values"], np.NaN)
missing_value_new = np.NaN
imputer = data_impute(missing_value_new, self.impute_dict[col]["strategy"],
self.impute_dict[col]["fill_value"])
imputer.fit(data[[col]])
data[col] = imputer.transform(data[[col]])
imputer_list.update({col: imputer})
elif flag == "val":
if isinstance(self.impute_dict[col]["missing_values"], list) and \
len(self.impute_dict[col]["missing_values"]) > 0:
data[[col]] = data[[col]].replace(self.impute_dict[col]["missing_values"], np.NaN)
data[col] = imputer_list[col].transform(data[[col]])
if not self.feature_flag and len(self.imputer_values_overall) > 0:
# if all features are imputed as a whole
imputer_values_overall = self.imputer_values_overall
# deal with more than one missing_values: transform the missing_values to np.NaN
if isinstance(self.imputer_values_overall, list):
self.train = self.train.replace(self.imputer_values_overall, np.NaN)
if self.val is not None:
self.val = self.val.replace(self.imputer_values_overall, np.NaN)
imputer_values_overall = np.NaN
# initialization
imupter = data_impute(imputer_values_overall, self.imputer_strategy_overall, self.imputer_fillvalue_overall)
self.train = pd.DataFrame(imupter.fit_transform(self.train), columns=self.columns, index=self.train_ids)
if self.val is not None:
self.val = pd.DataFrame(imupter.transform(self.val), columns=self.columns, index=self.val_ids)
self.model_file.update({"imputer": imupter})
logger.info("Overall imputation done")
elif self.feature_flag:
# if different features have different missing_values
imputer_list = {}
pd.Series(self.impute_dict.keys()).apply(lambda x: imputer_series(self.train, x, "train"))
if self.val is not None:
pd.Series(self.impute_dict.keys()).apply(lambda x: imputer_series(self.val, x, "val"))
self.model_file.update({"imputer": imputer_list})
logger.info("Imputation for features done")
def onehoter(self):
def onehot_series(col, flag):
if flag == "train":
onehot = OneHotEncoder(handle_unknown='ignore')
onehot.fit(self.train[[col]])
new_data = pd.DataFrame(onehot.transform(self.train[[col]]).toarray())
onehot_list[col] = onehot
col_len = len(onehot.categories_[0])
col_name = ["{}_{}".format(col, i) for i in range(col_len)]
new_data.columns = col_name
new_data.index = self.train.index
self.train = self.train.join(new_data).drop(columns=col)
elif flag == "val":
new_data = pd.DataFrame(onehot_list[col].transform(self.val[[col]]).toarray())
col_name = ["{}_{}".format(col, i) for i in range(len(onehot_list[col].categories_[0]))]
new_data.columns = col_name
new_data.index = self.val.index
self.val = self.val.join(new_data).drop(columns=col)
if len(self.onehot_feat_conf) > 0:
onehot_list = {}
pd.Series(self.onehot_feat_conf.keys()).apply(lambda x: onehot_series(x, "train"))
if self.val is not None:
pd.Series(self.onehot_feat_conf.keys()).apply(lambda x: onehot_series(x, "val"))
self.model_file.update({"onehot": onehot_list})
logger.info("Onehot for features done")
def fit(self) -> None:
"""
missing_values and outlier_values are combined to transform the data
"""
if len(self.missing_conf) == 0 and len(self.outlier_conf) == 0:
logger.info("No missing values and outlier values need to be imputed")
else:
logger.info("Missing values or outlier values will be imputed")
self.impute()
logger.info("Imputation done")
if len(self.onehot_conf) == 0:
logger.info("No onehot process")
else:
logger.info("Onehot will starts")
self.onehoter()
logger.info("Onehot done")
# recover label column
if self.label_name is not None:
self.train = self.train_label.join(self.train)
if self.val is not None:
self.val = self.val_label.join(self.val)
# save model file (optional)
if len(self.save_model) > 0:
save_model_config(stage_model_config=self.export_conf,
save_path=self.save_dir)
torch.save(self.model_file, self.save_model_path)
logger.info("Model file saved")
# save transformed data
if len(self.save_trainset_name) > 0:
save_train_path = self.save_dir / Path(self.save_trainset_name["name"])
if not os.path.exists(os.path.dirname(save_train_path)):
os.makedirs(os.path.dirname(save_train_path))
self.train.to_csv(save_train_path, index=self.input["trainset"][0]["has_id"])
logger.info("Preprocessed trainset done")
if self.val is not None:
save_val_path = self.save_dir / Path(self.save_valset_name["name"])
if not os.path.exists(os.path.dirname(save_val_path)):
os.makedirs(os.path.dirname(save_val_path))
self.val.to_csv(save_val_path, index=self.input["trainset"][0]["has_id"])
logger.info("Preprocessed valset done")
ProgressCalculator.finish_progress()
| 16,082 | 48.638889 | 120 | py |
XFL | XFL-master/python/algorithm/framework/local/feature_preprocess/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/local/feature_preprocess/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .label_trainer import LocalFeaturePreprocessLabelTrainer
LocalFeaturePreprocessTrainer = LocalFeaturePreprocessLabelTrainer
| 737 | 35.9 | 74 | py |
XFL | XFL-master/python/algorithm/framework/local/normalization/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import numpy as np
import pandas as pd
from scipy.linalg import norm
from google.protobuf import json_format
from service.fed_control import ProgressCalculator
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from common.utils.utils import save_model_config
from common.model.python.feature_model_pb2 import NormalizationModel
from common.utils.model_io import ModelIO
class LocalNormalizationLabelTrainer(TrainConfigParser):
def __init__(self, train_conf):
"""
Args:
train_conf:
"""
super().__init__(train_conf)
self.train_data = None
self.valid_data = None
self.save_dir = None
self.skip_cols = []
self.transform_switch = False
self._parse_config()
self._init_data()
self.export_conf = [{
"class_name": "LocalNormalization",
"filename": self.save_pmodel_name,
"input_schema": ','.join([_ for _ in self.train_data.columns if _ not in set(["y", "id"])]),
}]
def _parse_config(self) -> None:
"""
parse algo config
Returns:
"""
self.save_dir = self.output.get("path")
self.save_model_name = self.output.get("model", {}).get("name")
self.save_pmodel_name = self.output.get("proto_model", {}).get("name", "")
self.save_trainset_name = self.output.get("trainset", {}).get("name")
self.save_valset_name = self.output.get("valset", {}).get("name")
def _init_data(self) -> None:
"""
load raw data
1. using train set to generate the normalizer
2. apply the normalizer to the valid set for subsequent model training
Returns:
"""
if self.input_trainset:
df_list = []
for ts in self.input_trainset:
if ts.get("type") == "csv":
df_list.append(pd.read_csv(
os.path.join(ts.get("path"), ts.get("name"))))
if ts.get("has_id") and 'id' not in self.skip_cols:
self.skip_cols.append('id')
if ts.get("has_label") and 'y' not in self.skip_cols:
self.skip_cols.append('y')
else:
raise NotImplementedError(
"Load function {} does not Implemented.".format(
ts.get("type"))
)
self.train_data = pd.concat(df_list)
self.skip_cols.extend(
self.train_data.columns[self.train_data.dtypes == 'object'])
if len(self.skip_cols) > 0:
logger.info("Skip columns {}".format(','.join(self.skip_cols)))
if self.input_valset:
df_list = []
for vs in self.input_valset:
df_list.append(pd.read_csv(
os.path.join(vs.get("path"), vs.get("name"))))
self.transform_switch = True
self.valid_data = pd.concat(df_list)
def fit(self) -> None:
"""
train a normalizer
train_params:
- axis = {1 if normalization is done by samples, 0 if normalization is done by feature}
- norm = {"l1", "l2", "max"}
output:
- the .csv files which save the transformed data
- the .pt file which saves the normalizer
:return: None
"""
if self.train_data is None:
logger.info("no data, skip stage.".format(self.identity))
return
normalizer_dict = {}
cols = [_ for _ in self.train_data.columns if _ not in self.skip_cols]
if self.train_params.get("axis") == 1:
valid_normalizer = None
# independently normalize each sample
if self.train_params.get("norm") == "l1":
train_normalizer = self.train_data[cols].apply(
lambda x: norm(x, ord=1), axis=1)
if self.transform_switch:
valid_normalizer = self.valid_data[cols].apply(
lambda x: norm(x, ord=1), axis=1)
elif self.train_params.get("norm") == "l2":
train_normalizer = self.train_data[cols].apply(
lambda x: norm(x, ord=2), axis=1)
if self.transform_switch:
valid_normalizer = self.valid_data[cols].apply(
lambda x: norm(x, ord=2), axis=1)
elif self.train_params.get("norm") == "max":
train_normalizer = self.train_data[cols].apply(
lambda x: np.max(np.abs(x)), axis=1)
if self.transform_switch:
valid_normalizer = self.valid_data[cols].apply(
lambda x: np.max(np.abs(x)), axis=1)
else:
raise NotImplementedError("norm {} is invalid.".format(
self.train_params.get("norm", '')))
train_normalizer = np.where(
np.abs(train_normalizer - 0) < 1e-6, 1e-6, train_normalizer)
if self.transform_switch:
valid_normalizer = np.where(
np.abs(valid_normalizer - 0) < 1e-6, 1e-6, valid_normalizer)
for f in cols:
self.train_data[f] /= train_normalizer
if self.transform_switch:
self.valid_data[f] /= valid_normalizer
normalizer_dict["axis"] = 1
normalizer_dict["norm"] = self.train_params["norm"]
elif self.train_params.get("axis") == 0:
# normalize each feature
default_norm = self.train_params.get("norm")
norm_dict = {}
normalizers = {}
if default_norm is None:
pass
elif default_norm not in ("l1", "l2", "max"):
raise NotImplementedError("norm {} is invalid.".format(
self.train_params.get("norm", '')))
else:
for f in cols:
norm_dict[f] = default_norm
for f in self.train_params.get("feature_norm", []):
if self.train_params["feature_norm"][f].get("norm", default_norm) not in (
"l1", "l2", "max"):
raise NotImplementedError("norm {} is invalid.".format(
self.train_params.get("norm", '')))
elif f not in cols:
raise KeyError(
"Feature {} cannot be found in df.".format(f))
else:
norm_dict[f] = self.train_params["feature_norm"][f]["norm"]
for idx, (f, n) in enumerate(norm_dict.items()):
logger.info("{}: Count={}, Min={}, Max={}, Unique={}.".format(
f, self.train_data[f].count(), self.train_data[f].min(),
self.train_data[f].max(), self.train_data[f].nunique()
))
if n == "l1":
normalizer = norm(self.train_data[f].values, ord=1)
elif n == "l2":
normalizer = norm(self.train_data[f].values, ord=2)
elif n == "max":
normalizer = np.max(np.abs(self.train_data[f].values))
else:
normalizer = 1
if np.abs(normalizer - 0) < 1e-6:
normalizer = 1
self.train_data[f] /= normalizer
if self.transform_switch:
self.valid_data[f] /= normalizer
logger.info("{}: Norm={}.".format(f, normalizer))
normalizers[idx] = {"feature": f, "norm_value": float(normalizer)}
normalizer_dict["axis"] = 0
normalizer_dict["normalizer"] = normalizers
elif "axis" in self.train_params:
raise ValueError("axis {} is invalid.".format(
self.train_params["axis"]))
else:
raise KeyError(
"cannot find the param axis, which is required for normalization.")
self.save(normalizer_dict)
ProgressCalculator.finish_progress()
def save(self, normalizer):
if self.save_dir:
self.save_dir = Path(self.save_dir)
else:
return
print(normalizer, "----")
if self.save_pmodel_name:
save_model_config(stage_model_config=self.export_conf,
save_path=self.save_dir)
dump_path = self.save_dir / Path(self.save_pmodel_name)
norm_model = NormalizationModel()
json_format.ParseDict(normalizer, norm_model)
with open(dump_path, 'wb') as f:
f.write(norm_model.SerializeToString())
logger.info(
"Normalize results saved as {}.".format(dump_path)
)
if self.save_model_name:
ModelIO.save_json_model(
model_dict=normalizer,
save_dir=self.save_dir,
model_name=self.save_model_name,
)
logger.info(
"Normalize results saved as {}.".format(Path(self.save_dir) / self.save_model_name)
)
save_trainset_path = self.save_dir / Path(self.save_trainset_name)
if not os.path.exists(os.path.dirname(save_trainset_path)):
os.makedirs(os.path.dirname(save_trainset_path))
self.train_data.to_csv(
save_trainset_path, float_format='%.6g', index=False)
logger.info("Data saved as {}, length: {}.".format(
save_trainset_path, len(self.train_data)))
if self.transform_switch:
save_valset_path = self.save_dir / Path(self.save_valset_name)
if not os.path.exists(os.path.dirname(save_valset_path)):
os.makedirs(os.path.dirname(save_valset_path))
self.valid_data.to_csv(
save_valset_path, float_format='%.6g', index=False)
logger.info("Data saved as {}, length: {}.".format(
save_valset_path, len(self.valid_data)))
logger.info("Data normalize completed.")
| 10,839 | 40.532567 | 104 | py |
XFL | XFL-master/python/algorithm/framework/local/normalization/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/local/normalization/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .label_trainer import LocalNormalizationLabelTrainer
LocalNormalizationTrainer = LocalNormalizationLabelTrainer
| 725 | 35.3 | 74 | py |
XFL | XFL-master/python/algorithm/framework/local/standard_scaler/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import numpy as np
import pandas as pd
from google.protobuf import json_format
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from common.utils.utils import save_model_config
from common.model.python.feature_model_pb2 import StandardizationModel
from service.fed_control import ProgressCalculator
from common.utils.model_io import ModelIO
class LocalStandardScalerLabelTrainer(TrainConfigParser):
"""
local standard scaler
removing the mean and scaling to unit variance
z = (x - u) / s
where u is the mean of the training samples and s is the standard deviation of the training samples.
"""
def __init__(self, train_conf):
"""
Args:
train_conf:
"""
super().__init__(train_conf)
self.train_data = None
self.valid_data = None
self.save_dir = None
self.skip_cols = []
self.transform_switch = False
self._parse_config()
self._init_data()
self.export_conf = [{
"class_name": "LocalStandardScaler",
"filename": self.save_pmodel_name,
"input_schema": ','.join([_ for _ in self.train_data.columns if _ not in set(["y", "id"])]),
}]
def _parse_config(self) -> None:
self.save_dir = self.output.get("path")
self.save_model_name = self.output.get("model", {}).get("name")
self.save_pmodel_name = self.output.get(
"proto_model", {}).get("name", "")
self.save_trainset_name = self.output.get("trainset", {}).get("name")
self.save_valset_name = self.output.get("valset", {}).get("name")
def _init_data(self) -> None:
"""
load raw data
1. using train set to generate the standard scaler
2. apply it to the valid set for subsequent model training
:return:
"""
if self.input_trainset:
df_list = []
for ts in self.input_trainset:
if ts.get("type") == "csv":
df_list.append(pd.read_csv(
os.path.join(ts.get("path"), ts.get("name"))))
if ts.get("has_id") and 'id' not in self.skip_cols:
self.skip_cols.append('id')
if ts.get("has_label") and 'y' not in self.skip_cols:
self.skip_cols.append('y')
else:
raise NotImplementedError(
"Load function {} does not Implemented.".format(
ts.get("type"))
)
self.train_data = pd.concat(df_list)
self.skip_cols.extend(
self.train_data.columns[self.train_data.dtypes == 'object'])
if len(self.skip_cols) > 0:
logger.info("Skip columns {}".format(','.join(self.skip_cols)))
if self.input_valset:
df_list = []
for vs in self.input_valset:
if vs.get("type") == "csv":
df_list.append(pd.read_csv(
os.path.join(vs.get("path"), vs.get("name"))))
self.transform_switch = True
self.valid_data = pd.concat(df_list)
def fit(self) -> None:
"""
train a standard scaler
params:
- with_mean -> Boolean, u = 0 if with_mean=False
- with_std -> Boolean, s = 1 if with_std=False
output:
- the .csv files which save the transformed data
- the .pt file which saves the normalizer
:return: None
"""
if self.train_data is None:
logger.info("no data, skip stage.".format(self.identity))
return
scaler_dict = {}
cols = [_ for _ in self.train_data.columns if _ not in self.skip_cols]
standardize_dict = {}
standard_scaler = {}
default_with_mean = self.train_params.get("with_mean")
default_with_std = self.train_params.get("with_std")
if default_with_mean is None:
logger.warning(
"cannot find the param with_mean, skip global standardization.")
elif default_with_std is None:
logger.warning(
"cannot find the param with_std, skip global standardization.")
else:
for f in cols:
standardize_dict[f] = {
"with_mean": default_with_mean,
"with_std": default_with_std
}
for f in self.train_params.get("feature_standard", []):
if self.train_params["feature_standard"][f].get("with_mean") is None:
logger.warning(
"cannot find the param with_mean for feature {}, skip standardization.".format(f))
elif self.train_params["feature_standard"][f].get("with_std") is None:
logger.warning(
"cannot find the param with_std for feature {}, skip standardization.".format(f))
elif f not in cols:
raise KeyError("Feature {} cannot be found in df.".format(f))
else:
standardize_dict[f] = standardize_dict.get(f, {})
standardize_dict[f]["with_mean"] = self.train_params["feature_standard"][f]["with_mean"]
standardize_dict[f]["with_std"] = self.train_params["feature_standard"][f]["with_std"]
for idx, (f, d) in enumerate(standardize_dict.items()):
logger.info("{}: Count={}, Min={}, Max={}, Unique={}.".format(
f, self.train_data[f].count(), self.train_data[f].min(),
self.train_data[f].max(), self.train_data[f].nunique()
))
if d["with_mean"]:
u = float(self.train_data[f].mean())
else:
u = 0
if d["with_std"]:
s = float(self.train_data[f].std())
else:
s = 1
if np.abs(s - 0) < 1e-6:
s = 1
self.train_data[f] = (self.train_data[f] - u) / s
if self.transform_switch:
self.valid_data[f] = (self.valid_data[f] - u) / s
logger.info("{}: u={}, s={}.".format(f, u, s))
standard_scaler[idx] = {"feature": f, "u": u, "s": s}
scaler_dict["standard_scaler"] = standard_scaler
self.save(scaler_dict)
ProgressCalculator.finish_progress()
def save(self, scaler_dict):
if self.save_dir:
self.save_dir = Path(self.save_dir)
else:
return
if self.save_pmodel_name:
save_model_config(stage_model_config=self.export_conf,
save_path=self.save_dir)
dump_path = self.save_dir / Path(self.save_pmodel_name)
standard_model = StandardizationModel()
json_format.ParseDict(scaler_dict, standard_model)
with open(dump_path, 'wb') as f:
f.write(standard_model.SerializeToString())
logger.info("Standardize results saved as {}.".format(dump_path))
if self.save_model_name:
ModelIO.save_json_model(
model_dict=scaler_dict,
save_dir=self.save_dir,
model_name=self.save_model_name)
logger.info(
"Standardize results saved as {}.".format(Path(self.save_dir) / self.save_model_name))
save_trainset_path = self.save_dir / Path(self.save_trainset_name)
if not os.path.exists(os.path.dirname(save_trainset_path)):
os.makedirs(os.path.dirname(save_trainset_path))
self.train_data.to_csv(
save_trainset_path, float_format='%.6g', index=False)
logger.info("Data saved as {}, length: {}.".format(
save_trainset_path, len(self.train_data)))
if self.transform_switch:
save_valset_path = self.save_dir / Path(self.save_valset_name)
if not os.path.exists(os.path.dirname(save_valset_path)):
os.makedirs(os.path.dirname(save_valset_path))
self.valid_data.to_csv(
save_valset_path, float_format='%.6g', index=False)
logger.info("Data saved as {}, length: {}.".format(
save_valset_path, len(self.valid_data)))
logger.info("Data standardize completed.")
| 9,078 | 40.456621 | 104 | py |
XFL | XFL-master/python/algorithm/framework/local/standard_scaler/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/local/standard_scaler/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .label_trainer import LocalStandardScalerLabelTrainer
LocalStandardScalerTrainer = LocalStandardScalerLabelTrainer
| 728 | 35.45 | 74 | py |
XFL | XFL-master/python/algorithm/framework/local/data_split/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import multiprocessing
from pathlib import Path
import numpy as np
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from service.fed_control import ProgressCalculator
import glob
import time
# import multiprocessing
# multiprocessing.set_start_method('fork')
def parallel_apply_generator(func, iterable, workers, max_queue_size, dummy=False, random_seeds=True):
if dummy:
from multiprocessing.dummy import Pool, Queue, Manager
else:
from multiprocessing import Pool, Queue, Manager
in_queue, out_queue, seed_queue = Queue(max_queue_size), Manager().Queue(), Manager().Queue()
if random_seeds is True:
random_seeds = [None] * workers
for seed in random_seeds:
seed_queue.put(seed)
def worker_step(in_queue, out_queue):
if not seed_queue.empty():
np.random.seed(seed_queue.get())
while True:
ii, dd = in_queue.get()
r = func(dd)
out_queue.put((ii, r))
pool = Pool(workers, worker_step, (in_queue, out_queue))
in_count, out_count = 0, 0
for i, d in enumerate(iterable):
in_count += 1
while True:
in_queue.put((i, d), block=False)
break
if out_queue.qsize() > 0:
yield out_queue.get()
out_count += 1
while out_count != in_count:
yield out_queue.get()
out_count += 1
pool.terminate()
def parallel_apply(
func,
iterable,
workers,
max_queue_size,
callback=None,
dummy=False,
random_seeds=True,
unordered=True
):
generator = parallel_apply_generator(func, iterable, workers, max_queue_size, dummy, random_seeds)
if callback is None:
if unordered:
return [d for i, d in generator]
# else:
# results = sorted(generator, key=lambda d: d[0])
# return [d for i, d in results]
else:
for i, d in generator:
callback(d)
class LocalDataSplitLabelTrainer(TrainConfigParser):
def __init__(self, train_conf):
"""
support data split from more than one file:
if there is no input dataset name, all csv files under the input dataset path will be combined.
Args:
train_conf:
shuffle: bool, whether need to shuffle;
max_num_cores: int, parallel worker num;
batch_size: int, the size of small file;
"""
super().__init__(train_conf)
# input config
self.line_num = 0
self.input_data = self.input.get("dataset", [])
if self.input_data:
self.input_data_path = self.input_data[0].get("path", None)
self.input_data_name = self.input_data[0].get("name", None)
self.header = self.input_data[0].get("has_header", False)
if self.input_data:
if self.input_data_name is None:
# more than one file
self.files = glob.glob("{}/*.csv".format(self.input_data_path))
else:
# one file
self.files = glob.glob("{}/{}".format(self.input_data_path, self.input_data_name))
else:
raise NotImplementedError("Dataset was not configured.")
# output config
self.output_path = self.output.get("path")
self.save_trainset = self.output.get("trainset", {})
save_trainset_name = self.save_trainset.get("name", "{}_train.csv".format(
self.files[0].split("/")[-1].replace(".csv", '')))
if not os.path.exists(Path(self.output_path)):
Path(self.output_path).mkdir(parents=True, exist_ok=True)
self.save_trainset_name = Path(self.output_path, save_trainset_name)
if not os.path.exists(os.path.dirname(self.save_trainset_name)):
os.makedirs(os.path.dirname(self.save_trainset_name))
self.save_valset = self.output.get("valset", {})
save_valset_name = self.save_valset.get("name", "{}_val.csv".format(self.files[0].split("/")[-1].
replace(".csv", '')))
self.save_valset_name = Path(self.output_path, save_valset_name)
if not os.path.exists(os.path.dirname(self.save_valset_name)):
os.makedirs(os.path.dirname(self.save_valset_name))
# train info
self.shuffle = self.train_params.get("shuffle", False)
if self.shuffle:
logger.info("Shuffle is needed")
else:
logger.info("Shuffle is not needed")
self.worker_num = self.train_params.get("max_num_cores", 4)
self.worker_num = min(max(1, self.worker_num), multiprocessing.cpu_count())
if self.shuffle:
self.batch_size = self.train_params.get("batch_size", 100000)
self.train_weight = self.train_params.get("train_weight", 8)
self.val_weight = self.train_params.get("val_weight", 2)
self.train_ratio = self.train_weight / (self.train_weight + self.val_weight)
self.header_data = None
def local_shuffle(self, batch_k):
batch, k = batch_k
np.random.shuffle(batch)
with open("%s_local_shuffle/%05d.csv" % (self.input_data_path, k), "w") as f:
for text in batch:
f.write(text)
def generator(self):
batch, k = [], 0
for j in self.files:
header = self.header
with open(j) as f:
for line in f:
if header:
self.header_data = line
header = False
continue
batch.append(line.rstrip("\n")+"\n")
self.line_num += 1
if len(batch) == self.batch_size:
yield batch, k
batch = []
k += 1
if batch:
yield batch, k
def fit(self):
start_time = time.time()
if self.shuffle:
# mkdir of local_shuffle
temp_path = Path("%s_local_shuffle" % self.input_data_path)
if not os.path.exists(temp_path):
temp_path.mkdir(parents=True, exist_ok=True)
# local shuffle
parallel_apply(func=self.local_shuffle, iterable=self.generator(), workers=self.worker_num,
max_queue_size=10)
# train and val line num
trainset_num = int(self.line_num * self.train_ratio)
files = glob.glob("{}/*.csv".format(temp_path))
opens = [open(j) for j in files]
# global shuffle
n, k = 0, 0
F = open(self.save_trainset_name, "w")
if self.header:
F.write(self.header_data)
for i in range(self.batch_size):
orders = np.random.permutation(len(opens))
for j in orders:
text = opens[j].readline()
if text:
n += 1
F.write(text)
if n == trainset_num:
n = 0
k += 1
F = open(self.save_valset_name, "w")
if self.header:
F.write(self.header_data)
shutil.rmtree(temp_path, ignore_errors=True) # del temp path
else:
# count line num
for j in self.files:
header = self.header
with open(j) as f:
for line in f:
if header:
self.header_data = line
header = False
continue
self.line_num += 1
# train and val line num
trainset_num = int(self.line_num * self.train_ratio)
# read and write directly
F = open(self.save_trainset_name, "w")
if self.header:
F.write(self.header_data)
n = 0
for i in self.files:
header = self.header
with open(i) as f:
for text in f:
if header:
header = False
continue
n += 1
F.write(text)
if n == trainset_num:
n = 0
F = open(self.save_valset_name, "w")
if self.header:
F.write(self.header_data)
ProgressCalculator.finish_progress()
end_time = time.time()
logger.info("Time cost: %ss" % (end_time - start_time))
| 9,496 | 37.294355 | 105 | py |
XFL | XFL-master/python/algorithm/framework/local/data_split/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/local/data_split/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .label_trainer import LocalDataSplitLabelTrainer
LocalDataSplitTrainer = LocalDataSplitLabelTrainer
| 713 | 34.7 | 74 | py |
XFL | XFL-master/python/algorithm/framework/local/data_statistic/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from pathlib import Path
import numpy as np
import pandas as pd
from service.fed_control import ProgressCalculator
from algorithm.core.data_io import CsvReader
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
def float_transform(data):
if isinstance(data, pd.Series):
return data.apply(lambda x: float("%.6f" % x))
elif isinstance(data, pd.DataFrame):
col_name = data.columns[0]
return pd.DataFrame(data[col_name].apply(lambda x: float("%.6f" % x)))
class LocalDataStatisticLabelTrainer:
def __init__(self, train_conf):
"""
support data statistic:
If more than one file, their header should be the same.
If there are missing values, they will be dropped before data statistic
quantile: list of float, if not set, quartile will be calculated
"""
# input config
self.config = TrainConfigParser(train_conf)
self.input_data = self.config.input.get("dataset", [])
self.missing_value = [np.NaN, "", None, " ", "nan", "none", "null", "na", "None"]
if self.input_data:
if len(self.input_data) == 1:
self.input_data_path = self.input_data[0].get("path")
self.input_data_name = self.input_data[0].get("name")
self.input_data_id = self.input_data[0].get("has_id", False)
self.input_data_label = self.input_data[0].get("has_label", False)
data_reader = CsvReader(path=os.path.join(self.input_data_path, self.input_data_name),
has_id=self.input_data_id, has_label=self.input_data_label)
self.data = data_reader.table.set_index(data_reader.ids)
else:
self.data = pd.DataFrame()
for dataset_conf in self.input_data:
input_data_path = dataset_conf.get("path")
input_data_name = dataset_conf.get("name")
input_data_id = dataset_conf.get("has_id", False)
self.input_data_label = dataset_conf.get("has_label", False)
data_reader = CsvReader(path=os.path.join(input_data_path, input_data_name),
has_id=input_data_id, has_label=self.input_data_label)
data = data_reader.table.set_index(data_reader.ids)
self.data = pd.concat([self.data, data])
# drop label
if self.input_data_label:
self.y = pd.DataFrame(self.data.iloc[:, 0])
self.data = self.data.iloc[:, 1:]
# output config
self.output_flag = self.config.output.get("summary", None)
if self.output_flag is not None:
self.output_path = self.config.output["path"]
self.output_name = self.config.output["summary"]["name"]
self.output_path_name = Path(self.output_path, self.output_name)
if not os.path.exists(Path(self.output_path)):
Path(self.output_path).mkdir(parents=True, exist_ok=True)
# init summary dict
self.summary_dict = {}
self.indicators = ["mean", "median", "missing_ratio", "min", "max", "variance", "std", "quantile",
"skewness", "kurtosis"]
for i in self.indicators:
self.summary_dict[i] = {}
# missing value flag
self.missing_flag = dict(zip(self.data.columns, [False] * len(self.data.columns)))
# quantile config
self.quantile = self.config.train_params.get("quantile", [0.25, 0.5, 0.75])
def data_overview(self):
data_shape = np.shape(self.data)
self.summary_dict.update({"row_num": data_shape[0]})
self.summary_dict.update({"column_num": data_shape[1]})
self.summary_dict.update({"feature_names": list(self.data.columns)})
logger.info("The shape of input data is {}*{}".format(data_shape[0], data_shape[1]))
def missing_overview(self):
def missing_count(feat):
tmp = np.sum(self.data[feat].isin(self.missing_value))
if tmp > 0:
self.missing_flag[feat] = True
self.summary_dict["missing_ratio"][feat] = float("%.6f" % (tmp / self.summary_dict["row_num"]))
# replace all missing values to np.NaN
self.data[feat] = self.data[feat].replace(self.missing_value, np.NaN)
pd.Series(self.data.columns).apply(lambda x: missing_count(x))
def label_overview(self):
if self.input_data_label:
label_name = self.y.columns[0]
self.summary_dict.update({"label_num": self.y.groupby(label_name)[label_name].count().to_dict()})
def get_mean(self, df):
self.summary_dict["mean"].update(float_transform(df.mean()).to_dict())
def get_median(self, df):
self.summary_dict["median"].update(float_transform(df.median()).to_dict())
def get_min_max(self, df):
self.summary_dict["min"].update(float_transform(df.min()).to_dict())
self.summary_dict["max"].update(float_transform(df.max()).to_dict())
def get_variance(self, df):
self.summary_dict["variance"].update(float_transform(df.var()).to_dict())
def get_std(self, df):
self.summary_dict["std"].update(float_transform(df.std()).to_dict())
def get_quantile(self, df):
self.summary_dict["quantile"].update(float_transform(df.quantile(self.quantile)).to_dict())
def get_skewness(self, df):
self.summary_dict["skewness"].update(float_transform(df.skew()).to_dict())
def get_kurtosis(self, df):
self.summary_dict["kurtosis"].update(float_transform(df.kurtosis()).to_dict())
def fit(self):
self.data_overview()
self.missing_overview()
self.label_overview()
def feat_handle(feat):
if self.missing_flag[feat]:
data = pd.DataFrame(self.data[feat].dropna().apply(lambda x: eval(x)))
else:
data = self.data[[feat]]
return data
def feat_statistic(feat):
feat_ = feat_handle(feat)
self.get_mean(feat_)
self.get_median(feat_)
self.get_min_max(feat_)
self.get_variance(feat_)
self.get_std(feat_)
self.get_quantile(feat_)
self.get_skewness(feat_)
self.get_kurtosis(feat_)
logger.info("Feature {} calculated!".format(feat))
pd.Series(self.data.columns).apply(lambda x: feat_statistic(x))
# save
if self.output_flag is not None:
with open(self.output_path_name, "w") as wf:
json.dump(self.summary_dict, wf)
ProgressCalculator.finish_progress()
| 7,411 | 41.843931 | 109 | py |
XFL | XFL-master/python/algorithm/framework/local/data_statistic/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/framework/local/data_statistic/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .label_trainer import LocalDataStatisticLabelTrainer
LocalDataStatisticTrainer = LocalDataStatisticLabelTrainer
| 725 | 35.3 | 74 | py |
XFL | XFL-master/test/conftest.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import pytest
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
@pytest.fixture(scope='session', autouse=True)
def timer_session_scope():
start = time.time()
print('\nstart: {}'.format(time.strftime(DATE_FORMAT, time.localtime(start))))
yield
finished = time.time()
print('finished: {}'.format(time.strftime(DATE_FORMAT, time.localtime(finished))))
print('Total time cost: {:.3f}s'.format(finished - start))
@pytest.fixture(scope='function', autouse=True)
def timer_function_scope():
start = time.time()
yield
print('Time cost: {:.3f}s'.format(time.time() - start))
@pytest.fixture(scope="session", autouse=True)
def tmp_factory(tmpdir_factory):
p = tmpdir_factory.mktemp("unittest-tmp-dir")
return p
| 1,322 | 29.068182 | 83 | py |
XFL | XFL-master/test/service/test_fed_node.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest.mock as um
import grpc
from common.utils.grpc_channel_options import insecure_options
from service.fed_node import FedNode
class mock_server():
def add_insecure_port(self, address):
self.address = address
class Test_FedNode():
def test_init_fednode(self, monkeypatch):
task_network = '''{
"nodes": {
"node-1": {
"endpoints": [
{
"fuwuEndpointId": "scheduler-endpoint-1",
"url": "localhost:55001"
},
{
"fuwuEndpointId": "trainer-endpoint-1",
"url": "localhost:56001"
},
{
"fuwuEndpointId": "assist-trainer-endpoint-1",
"url": "localhost:57001"
}
],
"name": "promoter"
}
}
}'''
monkeypatch.setenv("__ENIGMA_FEDAPP_LOCAL_TASK_NODE_ID__", "node-1")
monkeypatch.setenv("__ENIGMA_FEDAPP_TASK_NETWORK__", task_network)
monkeypatch.setenv(
"DEBUG_LISTENING_PORT", '{"scheduler": 55001, "assist_trainer":57001, "node-1": 56001}')
FedNode.init_fednode("scheduler", "scheduler")
assert FedNode.node_id == "scheduler"
assert FedNode.listening_port == 55001
FedNode.init_fednode("assist_trainer", "assist_trainer")
assert FedNode.node_id == "assist_trainer"
assert FedNode.listening_port == 57001
FedNode.init_fednode("trainer", "node-1")
assert FedNode.config == {"node_id": "node-1", "scheduler": {"node_id": "node-1", "host": "localhost", "port": "55001", "use_tls": False, 'name': 'promoter'}, "trainer": {
"node-1": {"host": "localhost", "port": "56001", "use_tls": False, 'name': 'promoter'}, "assist_trainer": {"host": "localhost", "port": "57001", "use_tls": False, 'name': 'promoter'}}}
assert FedNode.scheduler_host == "localhost"
assert FedNode.scheduler_port == "55001"
assert FedNode.trainers == {"node-1": {"host": "localhost", "port": "56001", "use_tls": False, 'name': 'promoter'},
"assist_trainer": {"host": "localhost", "port": "57001", "use_tls": False, 'name': 'promoter'}}
assert FedNode.listening_port == 56001
def test_init_fednode2(self, tmp_path):
path = tmp_path / "fed_conf_scheduler.json"
fed_conf = {
"fed_info": {
"scheduler": {
"node-1": "localhost:55001"
},
"trainer": {
"node-1": "localhost:56001",
"node-2": "localhost:56002"
},
"assist_trainer": {
"assist_trainer": "localhost:57001"
}
},
"redis_server": "localhost:6379",
"grpc": {
"use_tls": False
}
}
f = open(path, 'w')
json.dump(fed_conf, f)
f.close()
FedNode.init_fednode("scheduler", "scheduler", tmp_path)
assert FedNode.node_id == "scheduler"
assert FedNode.listening_port == '55001'
path2 = tmp_path / "fed_conf.json"
f = open(path2, 'w')
json.dump(fed_conf, f)
f.close()
FedNode.init_fednode("assist_trainer", "assist_trainer", tmp_path)
assert FedNode.node_id == "assist_trainer"
assert FedNode.listening_port == '57001'
FedNode.init_fednode("trainer", "node-1", tmp_path)
assert FedNode.config == {'node_id': 'node-1', 'scheduler': {'node_id': 'node-1', 'host': 'localhost', 'port': '55001', 'use_tls': False},
'trainer': {'assist_trainer': {'node_id': 'assist_trainer', 'host': 'localhost', 'port': '57001', 'use_tls': False, 'name': 'assist_trainer'},
'node-1': {'host': 'localhost', 'port': '56001', 'use_tls': False, 'name': 'node-1'},
'node-2': {'host': 'localhost', 'port': '56002', 'use_tls': False, 'name': 'node-2'}}, 'redis_server': {'host': 'localhost', 'port': '6379'}}
assert FedNode.scheduler_host == "localhost"
assert FedNode.scheduler_port == "55001"
assert FedNode.trainers == {"node-1": {"host": "localhost", "port": "56001", "use_tls": False, 'name': 'node-1'},
'node-2': {'host': 'localhost', 'port': '56002', 'use_tls': False, 'name': 'node-2'},
"assist_trainer": {"host": "localhost", 'node_id': 'assist_trainer', "port": "57001", "use_tls": False, 'name': 'assist_trainer'}}
assert FedNode.listening_port == '56001'
def test_add_server(self, mocker):
server = mock_server()
mocker.patch.object(FedNode, 'listening_port', 55001)
spy_add_server = mocker.spy(FedNode, 'add_server')
FedNode.add_server(server)
assert server.address == "[::]:55001"
def test_create_channel(self, mocker):
mocker.patch.object(FedNode, 'scheduler_host', "localhost")
mocker.patch.object(FedNode, 'scheduler_port', "55001")
mocker.patch.object(FedNode, 'config', {"scheduler": {
"node_id": "node-1", "host": "localhost", "port": "55001", "use_tls": False}})
mocker.patch.object(FedNode, 'trainers', {"node-1": {"host": "localhost", "port": "56001",
"use_tls": True}, "assist_trainer": {"host": "localhost", "port": "57001", "use_tls": False}})
mocker.patch("grpc.secure_channel", return_value="secure_channel")
mocker.patch("grpc.insecure_channel", return_value="insecure_channel")
mocker.patch("grpc.intercept_channel",
return_value='intercept_channel')
channel = FedNode.create_channel("node-1")
assert FedNode.channels["node-1"] == "intercept_channel"
channel = FedNode.create_channel("scheduler")
assert FedNode.channels["scheduler"] == "intercept_channel"
grpc.insecure_channel.assert_called_once_with(
"localhost:55001", options=insecure_options)
def test_load_root_certificates(self, mocker):
mocker.patch("os.getcwd", return_value=os.path.join(
os.getcwd(), 'python'))
mocker.patch('builtins.open', um.mock_open(read_data=b"1"))
root_certificates = FedNode.load_root_certificates()
assert root_certificates == b"11"
def test_load_client_cert(self, mocker):
mocker.patch.object(FedNode, "config", {
"cert": {"client.key": "test", "client.crt": "test", "ca.crt": "test"}})
mocker.patch('builtins.open', um.mock_open(read_data='test'))
private_key, certificate_chain, root_certificates = FedNode.load_client_cert()
assert private_key == 'test'
assert certificate_chain == 'test'
assert root_certificates == 'test'
| 7,796 | 46.254545 | 196 | py |
XFL | XFL-master/test/service/test_fed_job.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.storage.redis.redis_conn import RedisConn
from common.communication.gRPC.python import status_pb2
from service.fed_job import FedJob
class mock_model(object):
def __init__(self, stage_config):
pass
class Test_FedJob():
def test_init_fedjob(self, mocker):
mocker.patch.object(RedisConn, 'get', return_value=1)
FedJob.init_fedjob()
RedisConn.get.assert_called_once_with("XFL_JOB_ID")
assert FedJob.job_id == 1
def test_init_progress(self):
FedJob.init_progress(2)
assert FedJob.total_stage_num == 2
assert FedJob.progress == [0, 0]
def test_get_model(self, mocker):
# mock get_operator
mocker.patch('service.fed_job.get_operator', return_value=mock_model)
model = FedJob.get_model("trainer", {"model_info": {"name": "VerticalKmeansTrainer"}})
assert isinstance(model, mock_model)
| 1,527 | 33.727273 | 94 | py |
XFL | XFL-master/test/service/test_service_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from concurrent import futures
import grpc
import pytest
from google.protobuf import text_format
from google.protobuf import json_format
import service.scheduler
from common.communication.gRPC.python import (checker_pb2, commu_pb2,
control_pb2, scheduler_pb2,
scheduler_pb2_grpc, status_pb2)
from common.storage.redis.redis_conn import RedisConn
from common.utils.config_parser import replace_variable
from common.utils.grpc_channel_options import insecure_options
from common.utils.logger import get_node_log_path, get_stage_node_log_path
from service.fed_config import FedConfig
from service.fed_job import FedJob
from service.fed_node import FedNode
from service.scheduler import SchedulerService
host = 'localhost'
listening_port = 55001
@pytest.fixture(scope='module', autouse=True)
def start_scheduler():
# 启动scheduler
server = grpc.server(futures.ThreadPoolExecutor(
max_workers=10), options=insecure_options)
scheduler_pb2_grpc.add_SchedulerServicer_to_server(
SchedulerService(is_bar=True), server)
server.add_insecure_port(f"[::]:{listening_port}")
server.start()
yield
server.stop(None)
@pytest.fixture()
def start_client():
channel = grpc.insecure_channel(
f"{host}:{listening_port}", options=insecure_options)
stub = scheduler_pb2_grpc.SchedulerStub(channel)
return stub
def yield_post_request():
requests = [
commu_pb2.PostRequest(key='test~test_channel_1~1', value=bytes(1)),
commu_pb2.PostRequest(key='test~test_channel_1~1', value=bytes(2)),
commu_pb2.PostRequest(key='test~test_channel_1~1', value=bytes(3))
]
for r in requests:
yield r
class TestSchedulerService():
def test_post(self, start_client, mocker):
# mock redis service
mocker.patch.object(RedisConn, 'put')
response = start_client.post(yield_post_request())
assert response == commu_pb2.PostResponse(code=0)
request_key = 'test~test_channel_1~1'
RedisConn.put.assert_called_once_with(request_key, bytes(6))
@pytest.mark.parametrize('nodeId, config', [('node-1', {0: {'node-1': {'trainer': 'test'}, 'node-2': {'label_trainer': 'test'}}})])
def test_getConfig(self, start_client, nodeId, config, mocker):
mocker.patch.object(FedConfig, 'trainer_config', config)
mocker.patch.object(FedJob, 'current_stage', 0)
mocker.patch.object(FedJob, 'job_id', 0)
request = scheduler_pb2.GetConfigRequest(nodeId=nodeId)
response = start_client.getConfig(request)
assert response == scheduler_pb2.GetConfigResponse(
config=json.dumps(config[0][nodeId]), code=0, jobId=0)
def test_control(self, start_client, mocker):
mocker.patch('service.scheduler.trainer_control',
return_value=control_pb2.ControlResponse(code=1, message='test'))
mocker.patch.object(FedJob, 'job_id', 1)
request = control_pb2.ControlRequest(control=control_pb2.STOP)
response = start_client.control(request)
service.scheduler.trainer_control.assert_called_once_with(
control_pb2.STOP)
assert response == control_pb2.ControlResponse(
code=1, message='Stop Scheduler Successful.\n'+'test', jobId=1, nodeLogPath={}, stageNodeLogPath={})
mocker.patch.object(FedJob, 'job_id', 1)
mocker.patch.object(FedJob, 'status', status_pb2.STOP_TRAIN)
request = control_pb2.ControlRequest(control=control_pb2.START)
response = start_client.control(request)
assert response == control_pb2.ControlResponse(
code=1, message="Scheduler not ready.", jobId=1, nodeLogPath={}, stageNodeLogPath={})
mocker.patch.object(FedJob, 'status', status_pb2.IDLE)
mocker.patch('service.scheduler.get_trainer_status', return_value={
'node-1': status_pb2.Status(code=2, status='TRAINING')})
request = control_pb2.ControlRequest(control=control_pb2.START)
response = start_client.control(request)
service.scheduler.get_trainer_status.assert_called()
assert response == control_pb2.ControlResponse(
code=1, message="Trainer node-1 not ready..", jobId=1, nodeLogPath={}, stageNodeLogPath={})
mocker.patch('service.scheduler.get_trainer_status', return_value={
'node-1': status_pb2.Status(code=4, status='FAILED')})
mocker.patch.object(RedisConn, 'incr', return_value=2)
mocker.patch.object(RedisConn, 'set')
request = control_pb2.ControlRequest(control=control_pb2.START)
response = start_client.control(request)
RedisConn.incr.assert_called_once_with('XFL_JOB_ID')
RedisConn.set.assert_called_once_with(
"XFL_JOB_STATUS_2", status_pb2.TRAINING)
job_log_path = get_node_log_path(job_id=FedJob.job_id, node_ids=list(FedNode.trainers.keys()) + ['scheduler'])
job_stages_log_path = get_stage_node_log_path(job_id=FedJob.job_id, train_conf=FedConfig.converted_trainer_config)
# if not FedConfig.trainer_config:
# job_log_path = {}
# job_stages_log_path = {}
# assert response == control_pb2.ControlResponse(
# code=0, message="", jobId=2, nodeLogPath=json.dumps(job_log_path), stageNodeLogPath=json.dumps(job_stages_log_path))
# assert FedJob.status == status_pb2.TRAINING
def test_recProgress(self, start_client, mocker):
# mocker.patch.object(FedJob, 'progress', {0: 0})
# request = scheduler_pb2.RecProgressRequest(stageId=0, progress=10)
# response = start_client.recProgress(request)
# assert response == scheduler_pb2.RecProgressResponse(code=0)
# assert FedJob.progress[0] == 10
mocker.patch.object(FedJob, 'job_id', 2)
mocker.patch.object(FedJob, 'current_stage', 0)
mocker.patch.object(FedJob, 'total_stage_num', 1)
mocker.patch.object(FedJob, 'progress', {0: 0})
mocker.patch.object(FedConfig, 'trainer_config', {
0: {'trainer': {'model_info': {'name': 'test'}}}})
mocker.patch.object(RedisConn, 'set', return_value=None)
request = scheduler_pb2.RecProgressRequest(stageId=0, progress=10)
response = start_client.recProgress(request)
assert response == scheduler_pb2.RecProgressResponse(code=0)
assert FedJob.progress[0] == 10
def test_status(self, start_client, mocker):
# 当前节点状态
mocker.patch.object(FedJob, 'job_id', 2)
mocker.patch.object(FedJob, 'status', 2)
mocker.patch('service.scheduler.get_trainer_status', return_value={
'node-1': status_pb2.Status(code=2, status='TRAINING')})
request = status_pb2.StatusRequest(jobId=0)
response = start_client.status(request)
assert response.schedulerStatus == status_pb2.Status(
code=2, status='TRAINING')
service.scheduler.get_trainer_status.assert_called()
assert response.trainerStatus == {
'node-1': status_pb2.Status(code=2, status='TRAINING')}
assert response.jobId == 2
# request = status_pb2.StatusRequest(jobId=2)
# response = start_client.status(request)
# assert response.jobStatus == status_pb2.Status(
# code=2, status='TRAINING')
# assert response.jobId == 2
mocker.patch.object(
RedisConn, 'get', return_value=status_pb2.SUCCESSFUL)
request = status_pb2.StatusRequest(jobId=1)
response = start_client.status(request)
# RedisConn.get.assert_called_once_with("XFL_JOB_STATUS_1")
assert response.jobStatus == status_pb2.Status(
code=3, status='SUCCESSFUL')
mocker.patch.object(RedisConn, 'get', return_value=status_pb2.FAILED)
request = status_pb2.StatusRequest(jobId=1)
response = start_client.status(request)
# RedisConn.get.assert_called_once_with("XFL_JOB_STATUS_1")
assert response.jobStatus == status_pb2.Status(code=4, status='FAILED')
@pytest.mark.parametrize('algo, config',
[
('vertical_xgboost', {
"trainer": 'test', "label_trainer": 'test'}),
('local_normalization', {
"trainer": 'test', "label_trainer": 'test'})
])
def test_getAlgorithmList(self, start_client, algo, config, mocker):
mocker.patch.object(FedConfig, 'algorithm_list', [
'vertical_xgboost', 'local_normalization'])
mocker.patch.object(FedConfig, 'default_config_map', {'vertical_xgboost': {'trainer': {'info': 'test'}, 'label_trainer': {
'info': 'test'}}, 'local_normalization': {'trainer': {'info': 'test'}, 'label_trainer': {'info': 'test'}}})
mocker.patch.object(json, 'dumps', return_value='test')
request = scheduler_pb2.GetAlgorithmListRequest()
response = start_client.getAlgorithmList(request)
assert response.algorithmList == [
'vertical_xgboost', 'local_normalization']
assert response.defaultConfigMap[algo] == scheduler_pb2.DefaultConfig(
config=config)
def test_getStage(self, start_client, mocker):
mocker.patch.object(FedJob, 'current_stage', 2)
mocker.patch.object(FedJob, 'total_stage_num', 3)
progress = {0: 100, 1: 45}
mocker.patch.object(FedJob, 'progress', progress)
stage_response = scheduler_pb2.GetStageResponse()
stage_name = "test"
stage_response.code = 0
stage_response.currentStageId = 1
stage_response.totalStageNum = 3
stage_response.currentStageName = stage_name
bar_response = scheduler_pb2.ProgressBar()
for stage, progress in progress.items():
bar_response.stageId = stage
bar_response.stageProgress = progress
stage_response.progressBar.append(bar_response)
mocker.patch.object(RedisConn, 'get', return_value=json_format.MessageToJson(stage_response))
request = scheduler_pb2.GetStageRequest()
request.jobId = 0
response = start_client.getStage(request)
assert response.code == 0
assert response.currentStageId == 1
assert response.totalStageNum == 3
assert response.currentStageName == 'test'
assert response.progressBar[0].stageId == 0
assert response.progressBar[0].stageProgress == 100
assert response.progressBar[1].stageId == 1
assert response.progressBar[1].stageProgress == 45
mocker.patch.object(RedisConn, 'get', return_value=None)
request = scheduler_pb2.GetStageRequest()
request.jobId = 0
response = start_client.getStage(request)
assert response.code == 3
# mocker.patch.object(FedJob, 'current_stage', 0)
# mocker.patch.object(FedJob, 'total_stage_num', 1)
# mocker.patch.object(FedJob, 'progress', {0: 0})
# mocker.patch.object(FedConfig, 'trainer_config', {
# 0: {'trainer': {'model_info': {'name': 'test'}}}})
# request = scheduler_pb2.GetStageRequest()
# response = start_client.getStage(request)
# assert response.code == 0
# assert response.currentStageId == 0
# assert response.totalStageNum == 1
# assert response.currentStageName == 'test'
# assert response.progressBar[0].stageId == 0
# assert response.progressBar[0].stageProgress == 0
# mocker.patch.object(FedConfig, 'trainer_config', {0: {}})
# request = scheduler_pb2.GetStageRequest()
# response = start_client.getStage(request)
# assert response.code == 1
# assert response.currentStageName == ''
# mocker.patch.object(FedConfig, 'trainer_config', [])
# request = scheduler_pb2.GetStageRequest()
# response = start_client.getStage(request)
# assert response.code == 2
# assert response.currentStageName == ''
def test_checkTaskConfig(self, start_client, mocker):
request = checker_pb2.CheckTaskConfigRequest()
conf = \
[
{
"identity": "label_trainer",
"model_info": {
# "name": "vertical_binning_woe_iv_fintech"
"name": "vertical_logistic_regression"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True,
"nan_list": [
]
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "vertical_binning_woe_iv_[STAGE_ID].json"
},
"iv": {
"name": "woe_iv_result_[STAGE_ID].json"
},
"split_points": {
"name": "binning_split_points_[STAGE_ID].json"
},
"trainset": {
"name": "fintech_woe_map_train_[STAGE_ID].csv"
}
},
"train_info": {
"interaction_params": {
"save_model": True
},
"train_params": {
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
},
"binning": {
"method": "equal_width",
"bins": 5
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
# "name": "vertical_feature_selection"
"name": "vertical_logistic_regression"
},
"input": {
"iv_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "woe_iv_result_[STAGE_ID-1].json"
},
"trainset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "feature_selection_[STAGE_ID].pkl"
},
"trainset": {
"name": "selected_train_[STAGE_ID].csv"
},
"valset": {
"name": "selected_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"filter": {
"common": {
"metrics": "iv",
"filter_method": "threshold",
"threshold": 0.01
}
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
# "name": "vertical_pearson"
"name": "vertical_logistic_regression"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"corr": {
"name": "vertical_pearson_[STAGE_ID].pkl"
}
},
"train_info": {
"train_params": {
"col_index": -1,
"col_names": "",
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 6,
"djn_on": True,
"parallelize_on": True
}
},
"max_num_cores": 999,
"sample_size": 9999
}
}
},
{
"identity": "label_trainer",
"model_info": {
# "name": "vertical_feature_selection"
"name": "vertical_logistic_regression"
},
"input": {
"corr_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "vertical_pearson_[STAGE_ID-1].pkl"
},
"iv_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "woe_iv_result_[STAGE_ID].json" # "name": "woe_iv_result_[STAGE_ID-3].json"
},
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-2].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_val_[STAGE_ID-2].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "feature_selection_[STAGE_ID].pkl"
},
"trainset": {
"name": "selected_train_[STAGE_ID].csv"
},
"valset": {
"name": "selected_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"filter": {
"common": {
"metrics": "iv",
"filter_method": "threshold",
"threshold": 0.01
},
"correlation": {
"sort_metric": "iv",
"correlation_threshold": 0.7
}
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
# "name": "local_normalization"
"name": "vertical_logistic_regression"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_val_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
# "name": "local_normalization_[STAGE_ID].pt"
"name": "vertical_logitstic_regression_[STAGE_ID].pt"
},
"trainset": {
"name": "normalized_train_[STAGE_ID].csv"
},
"valset": {
"name": "normalized_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"norm": "max",
"axis": 0
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_logistic_regression"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "normalized_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "normalized_val_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"pretrained_model": {
"path": "",
"name": ""
}
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
# "name": "vertical_logitstic_regression_[STAGE_ID].pt"
"name": "vertical_logitstic_regression_[STAGE_ID - 1].pt"
},
"metric_train": {
"name": "lr_metric_train_[STAGE_ID].csv"
},
"metric_val": {
"name": "lr_metric_val_[STAGE_ID].csv"
},
"prediction_train": {
"name": "lr_prediction_train_[STAGE_ID].csv"
},
"prediction_val": {
"name": "lr_prediction_val_[STAGE_ID].csv"
},
"ks_plot_train": {
"name": "lr_ks_plot_train_[STAGE_ID].csv"
},
"ks_plot_val": {
"name": "lr_ks_plot_val_[STAGE_ID].csv"
},
"decision_table_train": {
"name": "lr_decision_table_train_[STAGE_ID].csv"
},
"decision_table_val": {
"name": "lr_decision_table_val_[STAGE_ID].csv"
},
"feature_importance": {
"name": "lr_feature_importance_[STAGE_ID].csv"
}
},
"train_info": {
"interaction_params": {
"save_frequency": -1,
"write_training_prediction": True,
"write_validation_prediction": True,
"echo_training_metrics": True
},
"train_params": {
"global_epoch": 2,
"batch_size": 512,
"encryption": {
"ckks": {
"poly_modulus_degree": 8192,
"coeff_mod_bit_sizes": [
60,
40,
40,
60
],
"global_scale_bit_size": 40
}
},
"optimizer": {
"lr": 0.01,
"p": 2,
"alpha": 1e-4
},
"metric": {
"decision_table": {
"method": "equal_frequency",
"bins": 10
},
"acc": {},
"precision": {},
"recall": {},
"f1_score": {},
"auc": {},
"ks": {}
},
"early_stopping": {
"key": "acc",
"patience": 10,
"delta": 0
},
"random_seed": 50
}
}
}
]
request.dumpedTrainConfig = json.dumps(conf)
# request.existedInputPath.append()
response = start_client.checkTaskConfig(request)
# print("-------")
# print(text_format.MessageToString(response.multiStageResult))
# print(response.message)
# print(response.code)
# print(response)
m = text_format.MessageToString(response.crossStageResult)
assert m.replace(' ', '').replace('\n', '') == '''
duplicatedInputOutput {
dumpedValue: "\\"/opt/checkpoints/JOB_ID/NODE_ID/vertical_logitstic_regression_4.pt\\""
positionList {
stageId: 4
pathInfo {
dictPath {
key: "model"
}
}
}
positionList {
stageId: 5
pathInfo {
dictPath {
key: "model"
}
}
}
}
blankInputOutput {
dumpedValue: "\\"\\""
positionList {
stageId: 5
pathInfo {
dictPath {
key: "pretrained_model"
}
}
}
}
nonexistentInput {
dumpedValue: "\\"/opt/dataset/testing/fintech/banking_guest_train_v01_20220216_TL.csv\\""
positionList {
pathInfo {
dictPath {
key: "trainset"
}
}
}
}
nonexistentInput {
dumpedValue: "\\"/opt/dataset/testing/fintech/banking_guest_train_v01_20220216_TL.csv\\""
positionList {
stageId: 1
pathInfo {
dictPath {
key: "trainset"
}
}
}
}
nonexistentInput {
dumpedValue: "\\"/opt/dataset/testing/fintech/banking_guest_train_v01_20220216_TL.csv\\""
positionList {
stageId: 1
pathInfo {
dictPath {
key: "valset"
}
}
}
}
nonexistentInput {
dumpedValue: "\\"/opt/checkpoints/JOB_ID/NODE_ID/woe_iv_result_3.json\\""
positionList {
stageId: 3
pathInfo {
dictPath {
key: "iv_result"
}
}
}
}
nonexistentInput {
dumpedValue: "\\"\\""
positionList {
stageId: 5
pathInfo {
dictPath {
key: "pretrained_model"
}
}
}
}
'''.replace(' ', '').replace('\n', '')
| 31,059 | 40.303191 | 136 | py |
XFL | XFL-master/test/service/test_fed_control.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.communication.gRPC.python import control_pb2, status_pb2, scheduler_pb2
from service import fed_control
from service.fed_node import FedNode
from service.fed_job import FedJob
def test_update_progress(mocker):
mocker.patch("service.fed_control.scheduler_pb2_grpc.SchedulerStub.__init__", side_effect=lambda x:None)
mocker.patch("service.fed_control.scheduler_pb2_grpc.SchedulerStub.recProgress", create=True, return_value=scheduler_pb2.RecProgressResponse(code=0))
mocker.patch.object(FedNode, 'create_channel', return_value='56001')
mocker.patch.object(FedJob, 'current_stage', 0)
a = fed_control.ProgressCalculator(5, 5, 5)
a.cal_custom_progress(1, 1, 1)
a.finish_progress()
def test_trainer_control(mocker):
mocker.patch.object(FedNode, "create_channel", return_value='56001')
mocker.patch("service.fed_control.trainer_pb2_grpc.TrainerStub.__init__", side_effect=lambda x:None)
mocker.patch("service.fed_control.trainer_pb2_grpc.TrainerStub.control", create=True, return_value=control_pb2.ControlResponse(code=0,message='test\n'))
mocker.patch.object(FedNode, 'trainers', {"node-1":"test"})
resp = fed_control.trainer_control(control_pb2.STOP)
assert resp.message == "STOP Trainer: node-1 Successful.\n"
mocker.patch("service.fed_control.trainer_pb2_grpc.TrainerStub.control", create=True, return_value=control_pb2.ControlResponse(code=1,message='test\n'))
resp = fed_control.trainer_control(control_pb2.START)
assert resp.message == "START Trainer: node-1 Failed.\n"
# test exception
mocker.patch("service.fed_control.trainer_pb2_grpc.TrainerStub.control", create=True, side_effect=Exception)
resp = fed_control.trainer_control(control_pb2.START)
assert resp.message == "START Trainer: node-1 Failed.\n"
def test_get_trainer_status(mocker):
mocker.patch.object(FedNode, "create_channel", return_value='56001')
mocker.patch.object(FedNode, 'trainers', {"node-1":"test"})
mocker.patch("service.fed_control.trainer_pb2_grpc.TrainerStub.__init__", side_effect=lambda x:None)
mocker.patch("service.fed_control.trainer_pb2_grpc.TrainerStub.status", create=True, return_value=status_pb2.StatusResponse(trainerStatus={"node-1":status_pb2.Status(code=1,status='IDLE')}))
resp = fed_control.get_trainer_status()
assert resp == {"node-1":status_pb2.Status(code=1,status='IDLE')}
# test exception
mocker.patch("service.fed_control.trainer_pb2_grpc.TrainerStub.status", create=True, side_effect=Exception)
resp = fed_control.get_trainer_status()
assert resp == {"node-1":status_pb2.Status(code=-1,status='')}
| 3,258 | 48.378788 | 194 | py |
XFL | XFL-master/test/service/test_fed_config.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from common.communication.gRPC.python import scheduler_pb2
import service
from service.fed_config import FedConfig
from service.fed_job import FedJob
from service.fed_node import FedNode
class Test_FedConfig():
@pytest.mark.parametrize('trainer_list, result',
[
(['test_1', 'test_2'], ['test_1', 'test_2']),
([], [])
])
def test_get_label_trainer(self, mocker, trainer_list, result):
mocker.patch.object(FedConfig, 'stage_config', {
"fed_info": {"label_trainer": trainer_list}})
res = FedConfig.get_label_trainer()
assert res == result
@pytest.mark.parametrize('trainer_list, result',
[
(['test_1', 'test_2'], 'test_1'),
([], None)
])
def test_get_assist_trainer(self, mocker, trainer_list, result):
mocker.patch.object(FedConfig, 'stage_config', {
"fed_info": {"assist_trainer": trainer_list}})
res = FedConfig.get_assist_trainer()
assert res == result
@pytest.mark.parametrize('trainer_list, result',
[
(['test_1', 'test_2'], ['test_1', 'test_2']),
([], [])
])
def test_get_trainer(self, mocker, trainer_list, result):
mocker.patch.object(FedConfig, 'stage_config', {
"fed_info": {"trainer": trainer_list}})
res = FedConfig.get_trainer()
assert res == result
def test_load_config(self, mocker):
mocker.patch.object(FedJob, 'job_id', 1)
mocker.patch('service.fed_config.add_job_log_handler')
mocker.patch.object(FedConfig, 'load_trainer_config', return_value={})
FedConfig.load_config('test')
service.fed_config.add_job_log_handler.assert_called_once_with(1, '')
assert FedConfig.trainer_config == {}
def test_load_trainer_config(self, mocker):
mocker.patch.object(FedNode, 'trainers', {"node-1": "test"})
mocker.patch('service.fed_config.load_json_config',
return_value=[{"identity": "trainer"}])
mocker.patch("os.path.exists", return_value=True)
trainer_config = FedConfig.load_trainer_config("test")
assert trainer_config == {0: {"node-1": {"identity": "trainer", "fed_info": {
"label_trainer": [],
"trainer": ["node-1"],
"assist_trainer": []
}}}}
##
mocker.patch.object(FedNode, 'trainers', {"node-1": "test", "assist_trainer": "test2"})
def mock_load_json_config(*args, **kwargs):
if load_json_config.call_count == 1:
return [{
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost"
}
}]
elif load_json_config.call_count == 2:
return [{}]
load_json_config = mocker.patch('service.fed_config.load_json_config', side_effect=mock_load_json_config)
def mock_func(*args, **kwargs):
if os_path.call_count % 2 == 1:
return True
elif os_path.call_count % 2 == 0:
return False
os_path = mocker.patch("os.path.exists", side_effect=mock_func)
trainer_config = FedConfig.load_trainer_config("test")
assert trainer_config == \
{
0: {
'node-1': {
'identity': 'trainer',
'model_info': {
'name': 'vertical_xgboost'
},
'fed_info': {
'label_trainer': [],
'trainer': ['node-1'],
'assist_trainer': []
}
},
'assist_trainer': {
'fed_info': {
'label_trainer': [],
'trainer': ['node-1'],
'assist_trainer': []
}
}
}
}
##
def mock_func(*args, **kwargs):
return False
os_path = mocker.patch("os.path.exists", side_effect=mock_func)
trainer_config = FedConfig.load_trainer_config("test")
assert trainer_config == {}
##
mocker.patch.object(FedNode, 'trainers', {"node-1": "test", "node-2": "test3", "assist_trainer": "test2"})
def mock_load_json_config(*args, **kwargs):
if load_json_config.call_count == 1:
return [{
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost"
}
}]
elif load_json_config.call_count == 2:
return [
{"identity": "label_trainer",
"model_info": {
"name": "vertical_logistic_regression"
}
}
]
else:
return [{
"identity": "assist_trainer",
"model_info": {
"name": "vertical_xgboost"
}
}]
load_json_config = mocker.patch('service.fed_config.load_json_config', side_effect=mock_load_json_config)
def mock_func(*args, **kwargs):
if os_path.call_count <= 2:
return True
else:
return False
os_path = mocker.patch("os.path.exists", side_effect=mock_func)
trainer_config = FedConfig.load_trainer_config("test")
assert trainer_config == \
{
0:
{
'node-1': {
'identity': 'trainer',
'model_info': {'name': 'vertical_xgboost'},
'fed_info': {'label_trainer': ['node-2'], 'trainer': ['node-1'], 'assist_trainer': []}
},
'node-2': {
'identity': 'label_trainer',
'model_info': {'name': 'vertical_logistic_regression'},
'fed_info': {'label_trainer': ['node-2'], 'trainer': ['node-1'], 'assist_trainer': []}
},
}
}
###
##
mocker.patch.object(FedNode, 'trainers', {"node-1": "test", "node-2": "test3", "assist_trainer": "test2"})
def mock_load_json_config(*args, **kwargs):
if load_json_config.call_count == 1:
return [{
"identity": "trainer",
"model_info": {
"name": "vertical_kmeans"
}
}]
elif load_json_config.call_count == 2:
return [{
"identity": "label_trainer",
"model_info": {
"name": "vertical_kmeans"
}
}]
else:
return [{}]
load_json_config = mocker.patch('service.fed_config.load_json_config', side_effect=mock_load_json_config)
def mock_func(*args, **kwargs):
if os_path.call_count <= 2:
return True
else:
return False
os_path = mocker.patch("os.path.exists", side_effect=mock_func)
trainer_config = FedConfig.load_trainer_config("test")
# assert trainer_config == \
# {
# 0:
# {
# 'node-1': {
# 'identity': 'trainer',
# 'model_info': {'name': 'vertical_xgboost'},
# 'fed_info': {'label_trainer': ['node-2'], 'trainer': ['node-1'], 'assist_trainer': []}
# },
# 'node-2': {
# 'identity': 'label_trainer',
# 'model_info': {'name': 'vertical_logistic_regression'},
# 'fed_info': {'label_trainer': ['node-2'], 'trainer': ['node-1'], 'assist_trainer': []}
# },
# }
# }
def test_get_config(self, mocker):
mocker.patch.object(FedNode, "create_channel", return_value='55001')
mocker.patch("service.fed_config.scheduler_pb2_grpc.SchedulerStub.__init__", side_effect=lambda x: None)
mocker.patch("service.fed_config.scheduler_pb2_grpc.SchedulerStub.getConfig", create=True,
return_value=scheduler_pb2.GetConfigResponse(jobId=2, config="test_config"))
mocker.patch.object(FedJob, "global_epoch", 0)
mocker.patch("json.loads",
return_value={"model_info": {"name": "test"}, "train_info": {"train_params": {"global_epoch": 1}}})
mocker.patch("service.fed_config.add_job_log_handler", return_value="job_log_handler")
mocker.patch("service.fed_config.add_job_stage_log_handler", return_value="job_stage_log_handler")
resp = FedConfig.get_config()
FedNode.create_channel.assert_called_once_with("scheduler")
assert FedConfig.job_log_handler == "job_log_handler"
assert FedConfig.job_stage_log_handler == "job_stage_log_handler"
service.fed_config.add_job_log_handler.assert_called_once_with(2, '')
service.fed_config.add_job_stage_log_handler.assert_called_once_with(2, '', 0, "test")
assert FedJob.global_epoch == 1
assert resp.config == "test_config"
def test_load_algorithm_list(self, mocker):
def mock_load_json_config(args):
if '/algorithm/config/vertical_xgboost/trainer' in args:
return {"identity": "trainer"}
elif '/algorithm/config/vertical_xgboost/label_trainer' in args:
return {"identity": "label_trainer"}
mocker.patch('service.fed_config.load_json_config',
side_effect=mock_load_json_config)
FedConfig.load_algorithm_list()
assert FedConfig.default_config_map["vertical_xgboost"] == {"trainer": {
"identity": "trainer"}, "label_trainer": {"identity": "label_trainer"}}
| 11,371 | 39.614286 | 120 | py |
XFL | XFL-master/test/service/test_service_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
import grpc
import pytest
from common.communication.gRPC.python import (commu_pb2, control_pb2,
status_pb2, trainer_pb2_grpc)
from common.storage.redis.redis_conn import RedisConn
from common.utils.grpc_channel_options import insecure_options
from service.fed_job import FedJob
from service.fed_node import FedNode
from service.trainer import TrainerService
host = 'localhost'
listening_port = 56001
@pytest.fixture(scope='module', autouse=True)
def start_trainer():
# 启动scheduler
server = grpc.server(futures.ThreadPoolExecutor(
max_workers=10), options=insecure_options)
trainer_pb2_grpc.add_TrainerServicer_to_server(
TrainerService(), server)
server.add_insecure_port(f"[::]:{listening_port}")
server.start()
yield
server.stop(None)
@pytest.fixture()
def start_client():
channel = grpc.insecure_channel(
f"{host}:{listening_port}", options=insecure_options)
stub = trainer_pb2_grpc.TrainerStub(channel)
return stub
def yield_post_request():
requests = [
commu_pb2.PostRequest(key='test~test_channel_1~1', value=bytes(1)),
commu_pb2.PostRequest(key='test~test_channel_1~1', value=bytes(2)),
commu_pb2.PostRequest(key='test~test_channel_1~1', value=bytes(3))
]
for r in requests:
yield r
class TestTrainerService():
def test_post(self, start_client, mocker):
# mock redis service
mocker.patch.object(RedisConn, 'put')
response = start_client.post(yield_post_request())
assert response == commu_pb2.PostResponse(code=0)
request_key = 'test~test_channel_1~1'
RedisConn.put.assert_called_once_with(request_key, bytes(6))
@pytest.mark.parametrize('action',
[(control_pb2.START),
(control_pb2.STOP),
])
def test_control(self, start_client, action, mocker):
def action2status(action):
if action == control_pb2.START:
return status_pb2.START_TRAIN
if action == control_pb2.STOP:
return status_pb2.STOP_TRAIN
mocker.patch.object(FedJob, 'status', spec='value', create=True)
request = control_pb2.ControlRequest(control=action)
response = start_client.control(request)
assert response.code == 0
assert FedJob.status.value == action2status(action)
assert response.message == f"{status_pb2.StatusEnum.Name(request.control)} Completed."
@pytest.mark.parametrize('node_id, code',
[('node-1', 1),
('node-2', 2),
('node-3', 3),
])
def test_status(self, start_client, node_id, code, mocker):
mocker.patch.object(FedJob, 'status', spec='value', create=True)
mocker.patch.object(FedNode, 'node_id', node_id)
FedJob.status.value = code
request = status_pb2.StatusRequest()
response = start_client.status(request)
assert response.trainerStatus[node_id] == status_pb2.Status(
code=code, status=status_pb2.StatusEnum.Name(code))
| 3,866 | 35.828571 | 94 | py |
XFL | XFL-master/test/common/test_xoperator.py | import pytest
from common.xoperator import get_operator
from algorithm.framework.vertical.xgboost.trainer import VerticalXgboostTrainer
@pytest.mark.parametrize("name, role", [
("vertical_xgboost", "trainer"), ("vertical_xgboost", "client"),
("mixed_xgboost", "label_trainer"), ("vertical_abc", "assist_trainer")
])
def test_get_operator(name, role):
if role == "client" or name in ["mixed_xgboost", "vertical_abc"]:
with pytest.raises(ValueError):
get_operator(name, role)
else:
assert get_operator(name, role).__name__ == VerticalXgboostTrainer.__name__
| 614 | 33.166667 | 83 | py |
XFL | XFL-master/test/common/test_xregister.py | import pytest
from common.xregister import xregister, XRegister
from algorithm.framework.vertical.xgboost.trainer import VerticalXgboostTrainer
class Abc():
pass
class TestXRegister():
@pytest.mark.parametrize("target", [
(Abc), ("abc"), ("Abc"), (Abc), ("CDE")
])
def test_register(self, target):
if target == "abc":
xregister.register(target)(lambda x: x+2)
assert 'abc' in xregister.__dict__
elif target == "CDE":
with pytest.raises(TypeError):
xregister.register(target)("CDE")
else:
xregister.register(target)
assert 'Abc' in xregister.__dict__
@pytest.mark.parametrize("name", ["Abc", "XYZ"])
def test_call(self, name):
if name == "Abc":
assert xregister(name).__name__ == Abc.__name__
else:
with pytest.raises(KeyError):
xregister(name)
@pytest.mark.parametrize("name", ["Abc", "XYZ"])
def test_unregister(self, name):
xregister.unregister(name)
assert "Abc" not in xregister.__dict__
def test_registered_object(self):
res = xregister.registered_object
assert xregister.__dict__ == res
def test_get_class_name(self):
name = XRegister.get_class_name()
assert name == "XRegister"
| 1,380 | 29.021739 | 79 | py |
XFL | XFL-master/test/common/crypto/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/test/common/crypto/key_agreement/test_diffie_hellman_C(t_t).py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# import grpc
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import trainer_pb2_grpc
# from service.trainer import TrainerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import DiffieHellman
# FedNode.init_fednode()
# FedNode.config["node_id"] = 'node-1'
# FedNode.node_id = 'node-1'
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# trainer_pb2_grpc.add_TrainerServicer_to_server(TrainerService(), server)
# FedNode.add_server(server, "trainer")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# dh = DiffieHellman(fed_ids=['node-1', 'node-2'], key_bitlength=3072, optimized=True, channel_name="diffie_hellman")
# secret = dh.exchange()
# print(secret)
# print(dh) | 1,622 | 30.823529 | 117 | py |
XFL | XFL-master/test/common/crypto/key_agreement/test_diffie_hellman_B(s_t).py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# import grpc
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import trainer_pb2_grpc
# from service.trainer import TrainerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import DiffieHellman
# FedNode.init_fednode()
# FedNode.config["node_id"] = 'node-1'
# FedNode.node_id = 'node-1'
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# trainer_pb2_grpc.add_TrainerServicer_to_server(TrainerService(), server)
# FedNode.add_server(server, "trainer")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# dh = DiffieHellman(fed_ids=['scheduler', 'node-1'], key_bitlength=3072, optimized=True, channel_name="diffie_hellman")
# secret = dh.exchange()
# print(secret)
# print(dh) | 1,625 | 30.882353 | 120 | py |
XFL | XFL-master/test/common/crypto/key_agreement/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/test/common/crypto/key_agreement/test_diffie_hellman_A(s_t).py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# import grpc
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import scheduler_pb2_grpc
# from service.scheduler import SchedulerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import DiffieHellman
# FedNode.init_fednode(is_scheduler=True)
# FedNode.config["node_id"] = 'scheduler'
# FedNode.node_id = 'scheduler'
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# scheduler_pb2_grpc.add_SchedulerServicer_to_server(SchedulerService(), server)
# FedNode.add_server(server, "scheduler")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# dh = DiffieHellman(fed_ids=['scheduler', 'node-1'], key_bitlength=3072, optimized=True, channel_name="diffie_hellman")
# secret = dh.exchange()
# print(secret)
# print(dh) | 1,662 | 31.607843 | 120 | py |
XFL | XFL-master/test/common/crypto/key_agreement/test_diffie_hellman_D(t_t).py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# import grpc
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import trainer_pb2_grpc
# from service.trainer import TrainerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import DiffieHellman
# FedNode.init_fednode()
# FedNode.config["node_id"] = 'node-2'
# FedNode.node_id = 'node-2'
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# trainer_pb2_grpc.add_TrainerServicer_to_server(TrainerService(), server)
# FedNode.add_server(server, "trainer")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# dh = DiffieHellman(fed_ids=['node-1', 'node-2'], key_bitlength=3072, optimized=True, channel_name="diffie_hellman")
# secret = dh.exchange()
# print(secret)
# print(dh) | 1,622 | 30.823529 | 117 | py |
XFL | XFL-master/test/common/crypto/paillier/test_paillier.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import pickle
import pytest
from common.crypto.paillier.context import PaillierContext
from common.crypto.paillier.paillier import Paillier, PaillierCiphertext, RawCiphertext
data = [
(True, None, True, -1),
(True, 7, True, -1),
(True, 7, False, 1),
(False, None, True, -1),
(False, 7, True, -1),
(False, 7, False, 1)
]
data2 = [
(True,),
(False,)
]
class TestPaillier():
def setup_class(self):
self.context = PaillierContext.generate(2048)
self.epsilon = 1e-5
def teardown_class(self):
pass
def test_context(self):
a = self.context.serialize(save_private_key=False)
b = PaillierContext.deserialize_from(a)
with pytest.raises(AssertionError):
assert self.context == b
a = self.context.serialize(save_private_key=True)
b = PaillierContext.deserialize_from(a)
assert self.context == b
with pytest.raises(ValueError):
PaillierContext().init()
PaillierContext().init(3, 5, 10)
p, q = PaillierContext._generate_paillier_private_key(2048)
@pytest.mark.parametrize("djn_on, precision, is_batch, num_cores", data)
def test_unary(self, djn_on, precision, is_batch, num_cores):
s = (50,)
if is_batch:
p1 = np.random.random(s).astype(np.float32) * 100 - 50
else:
p1 = random.random() * 100 - 50
# encrypt
c1 = Paillier.encrypt(self.context, p1, precision=precision,
max_exponent=None, obfuscation=True, num_cores=num_cores)
# encrypt
pub_context = self.context.to_public()
c11 = Paillier.encrypt(pub_context, p1, precision=precision,
max_exponent=None, obfuscation=True, num_cores=num_cores)
# decrypt
a = Paillier.decrypt(self.context, c1, num_cores=num_cores)
assert (np.all(a - p1 < self.epsilon))
b = Paillier.decrypt(self.context, c11, num_cores=num_cores)
assert (np.all(b - p1 < self.epsilon))
with pytest.raises(TypeError):
Paillier.decrypt(pub_context, c1, num_cores=num_cores)
@pytest.mark.parametrize("djn_on, precision, is_batch, num_cores", data)
def test_binary(self, djn_on, precision, is_batch, num_cores):
s = (50,)
if is_batch:
p1 = np.random.random(s).astype(np.float32) * 100 - 50
p2 = np.random.random(s).astype(np.float32) * 100 - 20
else:
p1 = random.random() * 100 - 50
p2 = random.random() * 100 - 20
# encrypt
c1 = Paillier.encrypt(self.context, p1, precision=precision,
max_exponent=None, obfuscation=True, num_cores=num_cores)
c2 = Paillier.encrypt(self.context, p2, precision=precision,
max_exponent=None, obfuscation=True, num_cores=num_cores)
# sum
if is_batch:
c3 = sum(c1)
a = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert (a - sum(p1) < self.epsilon)
# add
c3 = c1 + c2
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert (np.all(p3 - (p1 + p2) < self.epsilon))
# sub
c3 = c1 - c2
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert(np.all(p3 - (p1 - p2) < self.epsilon))
c21 = Paillier.ciphertext_from(
self.context.to_public(), Paillier.serialize(c2))
c3 = c1 - c21
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert(np.all(p3 - (p1 - p2) < self.epsilon))
# add scalar
c3 = c1 + p2
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - (p1 + p2) < self.epsilon)
c3 = p2 + c1
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - (p1 + p2) < self.epsilon)
# sub scalar
c3 = c1 - p2
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - (p1 - p2) < self.epsilon)
c3 = p2 - c1
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - (p2 - p1) < self.epsilon)
c3 = c1 - p2
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - (p1 - p2) < self.epsilon)
c3 = p2 - c1
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - (p2 - p1) < self.epsilon)
# multiply
c3 = c1 * p2
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - p1 * p2 < self.epsilon)
c3 = p2 * c1
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - p1 * p2 < self.epsilon)
c3 = c1 * p2
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - p1 * p2 < self.epsilon)
c3 = p2 * c1
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - p1 * p2 < self.epsilon)
# divide
c3 = c1 / p2
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - p1 / p2 < self.epsilon)
c3 = c1 / p2
p3 = Paillier.decrypt(self.context, c3, num_cores=num_cores)
assert np.all(p3 - p1 / p2 < self.epsilon)
@pytest.mark.parametrize("p1_is_batch", data2)
def test_ciphertext(self, p1_is_batch):
s = (3, 40)
context = Paillier.context(2048)
if p1_is_batch:
p1 = np.random.random(s).astype(np.float32) * 100 - 50
else:
p1 = random.random() * 100 - 50
context2 = Paillier.context_from(context.to_public().serialize())
c1 = Paillier.encrypt(context2, p1, precision=None,
max_exponent=None, obfuscation=True)
s1 = Paillier.serialize(c1)
c2 = Paillier.ciphertext_from(context, s1)
p2 = Paillier.decrypt(context, c2)
assert np.all(p1 - p2 < 1e-10)
@pytest.mark.parametrize("compression", [True, False])
def test_rest(self, compression):
context = Paillier.context(2048)
p1 = random.random() * 100 - 50
c1 = Paillier.encrypt(context, p1, precision=7,
max_exponent=None, obfuscation=True)
serialized_c1 = c1.serialize(compression)
c1_2 = PaillierCiphertext.deserialize_from(context, serialized_c1, compression)
assert c1.raw_ciphertext == c1_2.raw_ciphertext
assert c1.exponent == c1_2.exponent
with pytest.raises(TypeError):
c1 + "342"
context2 = Paillier.context(2048)
p2 = random.random() * 100 - 50
c2 = Paillier.encrypt(context2, p2, precision=7,
max_exponent=None, obfuscation=True)
with pytest.raises(ValueError):
c1 + c2
with pytest.raises(TypeError):
c1 * c1
pub_context = context.to_public()
c1 = Paillier.encrypt(pub_context, p1, precision=7,
max_exponent=None, obfuscation=True)
p1_1 = Paillier.decrypt(context, c1)
assert abs(p1 - p1_1) < 1e-5
c1 = Paillier.obfuscate(c1)
p1_1 = Paillier.decrypt(context, c1)
assert abs(p1 - p1_1) < 1e-5
context = Paillier.context(2048, djn_on=True)
c1 = Paillier.encrypt(context, p1, precision=7,
max_exponent=None, obfuscation=True)
p1_1 = Paillier.decrypt(context, c1)
assert abs(p1 - p1_1) < 1e-5
pub_context = context.to_public()
c1 = Paillier.encrypt(pub_context, p1, precision=7,
max_exponent=None, obfuscation=True)
p1_1 = Paillier.decrypt(context, c1)
assert abs(p1 - p1_1) < 1e-5
c1 = Paillier.encrypt(pub_context, p1, precision=7,
max_exponent=20, obfuscation=False)
p1_1 = Paillier.decrypt(context, c1, num_cores=1)
assert abs(p1 - p1_1) < 1e-5
c1 = Paillier.encrypt(pub_context, p1, precision=7,
max_exponent=20, obfuscation=True, num_cores=1)
p1_1 = Paillier.decrypt(context, c1)
assert abs(p1 - p1_1) < 1e-5
with pytest.raises(TypeError):
Paillier.encrypt(pub_context, "123", precision=7,
max_exponent=None, obfuscation=True)
p3 = 3
c3 = Paillier.encrypt(pub_context, p3, precision=7,
max_exponent=None, obfuscation=True)
p3_1 = Paillier.decrypt(context, c3)
assert p3_1 == p3
c3 = Paillier.obfuscate(c3)
p3_1 = Paillier.decrypt(context, c3, dtype='int')
assert np.all(p3_1 == p3)
p3_1 = Paillier.decrypt(context, c3, dtype='i64')
assert np.all(p3_1 == p3)
p4 = np.array([2, 3], dtype=np.int32)
c4 = Paillier.encrypt(pub_context, p4, precision=7,
max_exponent=None, obfuscation=True)
p4_1 = Paillier.decrypt(context, c4, dtype='int')
assert np.all(p4_1 == p4)
p4_1 = Paillier.decrypt(context, c4, dtype='i64')
assert np.all(p4_1 == p4)
c4 = Paillier.encrypt(pub_context, p4, precision=7,
max_exponent=None, obfuscation=True, num_cores=1)
p4_1 = Paillier.decrypt(context, c4)
assert np.all(p4_1 == p4)
p4_1 = Paillier.decrypt(context, c4, num_cores=1)
assert np.all(p4_1 == p4)
c4 = Paillier.obfuscate(c4)
p4_1 = Paillier.decrypt(context, c4)
assert np.all(p4_1 == p4)
assert 123 == Paillier._decrypt_single(123, context)
with pytest.raises(TypeError):
Paillier.decrypt(context, 123)
with pytest.raises(TypeError):
Paillier.obfuscate(123)
| 10,813 | 35.167224 | 88 | py |
XFL | XFL-master/test/common/crypto/one_time_pad/test_one_time_add.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from functools import reduce
from random import randint
from secrets import token_hex
import numpy as np
import pytest
import torch
from common.crypto.one_time_pad.one_time_add import (OneTimeAdd,
OneTimePadCiphertext,
OneTimePadContext,
OneTimeKey)
def almost_equal(a, b):
if isinstance(a, np.ndarray):
return np.all(a - b < 1e-4)
elif isinstance(a, torch.Tensor):
return torch.all(a - b < 1e-4)
else:
return a - b < 1e-4
# def correctness_scalar(modulus_exp, data_type, num_keys):
# # random keys
# key1 = [np.array(int(token_hex(modulus_exp//8), 16))]
# for i in range(num_keys - 1):
# key = int(token_hex(modulus_exp//8), 16)
# key = np.array(key)
# key1.append(key)
# is_addition = randint(0, 1)
# # random input
# if "numpy" in data_type:
# data = np.random.random(())
# elif "torch" in data_type:
# data = torch.rand(())
# # context
# context_ = OneTimePadContext(modulus_exp, data_type)
# # encrypt
# ciphertext = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
# ciphertext2 = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=True)
# assert pickle.dumps(ciphertext.data) == ciphertext2
# # decrypt
# plaintext = OneTimeAdd.decrypt(context_, ciphertext, key1, is_addition)
# assert almost_equal(data, plaintext)
def correctness(data_shape, modulus_exp, data_type, num_keys):
if data_shape == ():
flatten_shape = 0
else:
flatten_shape = reduce(lambda x, y: x*y, data_shape)
# random keys
if flatten_shape == 0:
key1 = [np.array(int(token_hex(modulus_exp//8), 16))]
for i in range(num_keys - 1):
key = int(token_hex(modulus_exp//8), 16)
key = np.array(key)
key1.append(key)
else:
key1 = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key1 = [np.array(key1).reshape(*data_shape)]
for i in range(num_keys - 1):
key = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key = np.array(key).reshape(*data_shape)
key1.append(key)
key1 = OneTimeKey(key1, modulus_exp)
is_addition = [randint(0, 1) for i in range(len(key1))]
# random input
if "numpy" in data_type:
data = np.random.random(data_shape)
elif "torch" in data_type:
data = torch.rand(data_shape)
# context
context_ = OneTimePadContext(modulus_exp, data_type)
# encrypt
ciphertext = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
ciphertext2 = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=True)
assert pickle.dumps(ciphertext.data) == ciphertext2
# decrypt
plaintext = OneTimeAdd.decrypt(context_, ciphertext, key1, is_addition)
assert almost_equal(data, plaintext)
# addition and subtraction
# random input
if "numpy" in data_type:
data3 = np.random.random(data_shape)
elif "torch" in data_type:
data3 = torch.rand(data_shape)
key3 = list(map(lambda x: np.array(-x), key1.value))
key3 = OneTimeKey(key3, modulus_exp)
ciphertext3 = OneTimeAdd.encrypt(context_, data3, key3, is_addition, serialized=False)
ciphertext4 = OneTimeAdd.encrypt(context_, data3, key1, is_addition, serialized=False)
c = ciphertext + ciphertext3
plaintext = c.decode()
assert almost_equal(data + data3, plaintext)
c = ciphertext - ciphertext4
plaintext = c.decode()
assert almost_equal(data - data3, plaintext)
if flatten_shape == 0:
key2 = [np.array(int(token_hex(modulus_exp//8), 16))]
for i in range(num_keys - 1):
key = int(token_hex(modulus_exp//8), 16)
key = np.array(key)
key2.append(key)
else:
key2 = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key2 = [np.array(key2).reshape(*data_shape)]
for i in range(num_keys - 1):
key = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key = np.array(key).reshape(*data_shape)
key2.append(key)
key2 = OneTimeKey(key2, modulus_exp)
ciphertext1 = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
ciphertext2 = OneTimeAdd.encrypt(context_, data3, key2, is_addition, serialized=False)
c = ciphertext1 + ciphertext2
key3 = [key1.value[i] + key2.value[i] for i in range(len(key1))]
key3 = OneTimeKey(key3, modulus_exp)
p = OneTimeAdd.decrypt(context_, c, key3, is_addition)
assert almost_equal(data + data3, p)
ciphertext1 = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
ciphertext2 = OneTimeAdd.encrypt(context_, data3, OneTimeKey([-i for i in key2.value], modulus_exp), is_addition, serialized=False)
c = ciphertext1 + ciphertext2
key3 = [key1.value[i] - key2.value[i] for i in range(len(key1))]
key3 = OneTimeKey(key3, modulus_exp)
p = OneTimeAdd.decrypt(context_, c, key3, is_addition)
assert almost_equal(data + data3, p)
def test_correctness():
data_shape_list = [(), (11,), (3, 5), (7, 10, 24)]
modulus_exp_list = [64, 128]
data_type_list = ["numpy.ndarray", "numpy", "torch.Tensor", "torch"]
numpy_keys_list = [1] # [1, 3, 5]
# for modulus_exp in modulus_exp_list:
# for data_type in data_type_list:
# for numpy_keys in numpy_keys_list:
# correctness_scalar(modulus_exp, data_type, numpy_keys)
for data_shape in data_shape_list:
for modulus_exp in modulus_exp_list:
for data_type in data_type_list:
for numpy_keys in numpy_keys_list:
correctness(data_shape, modulus_exp, data_type, numpy_keys)
def test_exception():
modulus_exp = 128,
data_type = "pandas"
with pytest.raises(ValueError):
OneTimePadContext(modulus_exp, data_type)
with pytest.raises(ValueError):
OneTimePadContext(modulus_exp, data_type)
# ------------------------------------------------------------------------------
modulus_exp = 128
data_type = "numpy.ndarray"
context_ = OneTimePadContext(modulus_exp, data_type)
data = 'fdfdsfd'
with pytest.raises(TypeError):
OneTimePadCiphertext(data, context_)
context_ = 54645654634
data = np.array([2, 4])
with pytest.raises(TypeError):
OneTimePadCiphertext(data, context_)
# ------------------------------------------------------------------------------
key_shape = (3, 4)
flatten_shape = 12
key = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key = np.array(key).reshape(*key_shape)
key = OneTimeKey(key, modulus_exp)
is_addition = [randint(0, 1) for i in range(len(key))]
data_type = "torch.Tensor"
context_ = OneTimePadContext(modulus_exp, data_type)
data_shape = (4, 5)
data = torch.rand(data_shape)
with pytest.raises(ValueError):
OneTimeAdd.encrypt(context_, data, key, is_addition, serialized=True)
key_shape = (4, 5)
flatten_shape = 20
key = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key = np.array(key).reshape(*key_shape)
key = OneTimeKey(key, modulus_exp)
# ------------------------------------------------------------------------------
modulus_exp = 128
data_type = 'numpy'
key_shape = (3, 4)
flatten_shape = 12
key = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key = np.array(key).reshape(*key_shape)
key = OneTimeKey(key, modulus_exp)
key_shape = (4, 5)
flatten_shape = 20
key1 = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key1 = np.array(key1).reshape(*key_shape)
key1 = OneTimeKey(key1, modulus_exp)
is_addition = randint(0, 1)
data_shape = (4, 5)
data = np.random.random(data_shape)
context_ = OneTimePadContext(modulus_exp, data_type)
ciphertext = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
with pytest.raises(ValueError):
OneTimeAdd.decrypt(context_, ciphertext, key, is_addition)
key = [key.value, key.value]
key = OneTimeKey(key, modulus_exp)
ciphertext = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
with pytest.raises(ValueError):
OneTimeAdd.decrypt(context_, ciphertext, key, is_addition)
# ------------------------------------------------------------------------------
modulus_exp = 64
context2 = OneTimePadContext(modulus_exp, data_type)
key2 = OneTimeKey(key1.value, modulus_exp)
ciphertext2 = OneTimeAdd.encrypt(context2, data, key2, is_addition, serialized=False)
with pytest.raises(ValueError):
ciphertext + ciphertext2
# ------------------------------------------------------------------------------
is_addition = [randint(0, 1) for i in range(len(key) + 1)]
with pytest.raises(ValueError):
OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
| 10,297 | 34.388316 | 135 | py |
XFL | XFL-master/test/common/crypto/csprng/test_hmac_drbg_cross_validation.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from common.crypto.csprng.drbg import get_drbg_inst
NAME = 'hmac_drbg'
METHOD = 'sha512'
def hex2bytes(a):
return bytes.fromhex(a.replace(' ', ''))
def test_hmac_drbg_cross_validation():
entropy = '000102 03040506'\
'0708090A 0B0C0D0E 0F101112 13141516 1718191A 1B1C1D1E'\
'1F202122 23242526 2728292A 2B2C2D2E 2F303132 33343536'
nonce = '20212223 24252627'
additional_data = '12345678'
# ------------------------------------------------------------------------------
drbg_g1 = get_drbg_inst(name=NAME,
entropy=hex2bytes(entropy),
method=METHOD,
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
drbg_g2 = get_drbg_inst(name=NAME,
entropy=hex2bytes(entropy),
method=METHOD,
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
drbg1 = get_drbg_inst(name=NAME,
entropy=hex2bytes(entropy),
method=METHOD,
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
drbg2 = get_drbg_inst(name=NAME,
entropy=hex2bytes(entropy),
method=METHOD,
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
num_bytes = [100] * 100 + [1000] * 1000
# start = time.time()
out1 = drbg1.gen(num_bytes)
# print(time.time() - start)
# start = time.time()
out2 = drbg2.gen(sum(num_bytes))
# print(time.time() - start)
# start = time.time()
g1 = drbg_g1.generator(num_bytes)
out_g1 = []
for o in g1:
out_g1.append(o)
# print(time.time() - start)
num_bytes1 = sum(num_bytes)
# start = time.time()
g2 = drbg_g2.generator(num_bytes1)
out_g2 = next(g2)
# print(time.time() - start)
assert out1[0] == out_g1[0]
assert out1[-1] == out_g1[-1]
assert np.all([a == b for a, b in zip(out1, out_g1)])
assert out2 == out_g2
assert out2 == b''.join(out_g1)
if __name__ == "__main__":
test_hmac_drbg_cross_validation() | 3,054 | 30.822917 | 84 | py |
XFL | XFL-master/test/common/crypto/csprng/hmac_drbg_benchmark.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import time
# from typing import Tuple
# import numpy as np
# import random
# from gmpy2 import mpz
# from fed_api import get_drbg_inst
# def hex2bytes(a):
# return bytes.fromhex(a.replace(' ', ''))
# def split_bytes(x: bytes, out_shape: Tuple[int]):
# if len(out_shape) == 0:
# # return mpz(int(x, 16))
# return mpz(int.from_bytes(x, 'big'))
# elif len(out_shape) == 1:
# a = len(x) // out_shape[0]
# # return [mpz(int(x[a*i: a*(i+1)], 16)) for i in range(out_shape[0])]
# return [mpz(int.from_bytes(x[a*i: a*(i+1)], 'big')) for i in range(out_shape[0])]
# else:
# a = len(x) // out_shape[0]
# return [split_bytes(x[a*i: a*(i+1)], out_shape[1:]) for i in range(out_shape[0])]
# def benchmark():
# # entropy = '000102 03040506'\
# # '0708090A 0B0C0D0E 0F101112 13141516 1718191A 1B1C1D1E'\
# # '1F202122 23242526 2728292A 2B2C2D2E 2F303132 33343536'
# # nonce = '20212223 24252627'
# # additional_data = ''
# # # ------------------------------------------------------------------------------
# # drbg = get_drbg_inst(name='hmac_drbg',
# # entropy=hex2bytes(entropy),
# # method='sha512',
# # nonce=hex2bytes(nonce),
# # additional_data=hex2bytes(additional_data))
# # # first call to generate
# # start = time.time()
# # for i in range(1):
# # out = drbg.gen(num_bytes=16653*256*128//8)
# # end = time.time()
# # print(end - start)
# # start = time.time()
# # out1 = split_bytes(out, [16653, 256])
# # end = time.time()
# # print(end - start)
# # start = time.time()
# # b = np.frombuffer(bytes(out), np.uint8).reshape(16653, 256, 128//8)
# # end = time.time()
# # print(end - start)
# # start = time.time()
# # b = np.frombuffer(bytes(out), np.int64).reshape(16653, 256*2)
# # end = time.time()
# # print(end - start)
# # start = time.time()
# # bytes(out)
# # print(time.time() - start)
# # start = time.time()
# # np.array(out1)
# # end = time.time()
# # print(end - start)
# # print((end - start) / 1)
# # start = time.time()
# # out = drbg.generator(num_bytes=[16653*256*128//8]*1)
# # for i in range(1):
# # next(out)
# # end = time.time()
# # print((end - start) / 1)
# print("#########")
# a = [random.randint(0, 2**128 - 1) for i in range(16653*256)]
# b = [mpz(i) for i in a]
# start = time.time()
# a = np.array(a)
# print(time.time() - start)
# start = time.time()
# b = np.array(b)
# print(time.time() - start)
# start = time.time()
# # (a + a)
# np.mod(a+a, 2**128)
# print(time.time() - start)
# start = time.time()
# # b + b
# np.mod(b+b, 2**128)
# print(time.time() - start)
# if __name__ == "__main__":
# benchmark() | 3,706 | 27.960938 | 91 | py |
XFL | XFL-master/test/common/crypto/csprng/test_hmac_drbg_generator.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.crypto.csprng.drbg import get_drbg_inst
"""
HMAC_DRBG
Tese cases provide by:
https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Standards-and-Guidelines/documents/examples/HMAC_DRBG.pdf
Test method=sha256
Ignore sha1, sha224, sha384, sha512
"""
def hex2bytes(a):
return bytes.fromhex(a.replace(' ', ''))
def test_hmac_drbg_correctness():
entropy = '000102 03040506'\
'0708090A 0B0C0D0E 0F101112 13141516 1718191A 1B1C1D1E'\
'1F202122 23242526 2728292A 2B2C2D2E 2F303132 33343536'
nonce = '20212223 24252627'
additional_data = ''
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
# first call to generate
out = drbg.generator(num_bytes=512//8)
rand_val1 = 'D67B8C17 34F46FA3 F763CF57 C6F9F4F2'\
'DC1089BD 8BC1F6F0 23950BFC 56176352 08C85012 38AD7A44'\
'00DEFEE4 6C640B61 AF77C2D1 A3BFAA90 EDE5D207 406E5403'
assert next(out).hex() == rand_val1.replace(' ', '').lower()
# second call to generate
out = drbg.generator(num_bytes=512//8)
rand_val1 = '8FDAEC20 F8B42140 7059E358 8920DA7E'\
'DA9DCE3C F8274DFA 1C59C108 C1D0AA9B 0FA38DA5 C792037C'\
'4D33CD07 0CA7CD0C 5608DBA8 B8856546 39DE2187 B74CB263'
assert next(out).hex() == rand_val1.replace(' ', '').lower()
additional_data1 = '606162 63646566'\
'6768696A 6B6C6D6E 6F707172 73747576 7778797A 7B7C7D7E'\
'7F808182 83848586 8788898A 8B8C8D8E 8F909192 93949596'
additional_data2 = 'A0A1A2 A3A4A5A6'\
'A7A8A9AA ABACADAE AFB0B1B2 B3B4B5B6 B7B8B9BA BBBCBDBE'\
'BFC0C1C2 C3C4C5C6 C7C8C9CA CBCCCDCE CFD0D1D2 D3D4D5D6'
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
# first call to generate
out = drbg.generator(num_bytes=512//8, additional_data=hex2bytes(additional_data1))
rand_val1 = '41878735 8135419B 93813353 5306176A'\
'FB251CDD 2BA37988 59B566A0 5CFB1D68 0EA92585 6D5B84D5'\
'6ADAE870 45A6BA28 D2C908AB 75B7CC41 431FAC59 F38918A3'
assert next(out).hex() == rand_val1.replace(' ', '').lower()
# second call to generate
out = drbg.generator(num_bytes=512//8, additional_data=hex2bytes(additional_data2))
rand_val2 = '7C067BDD CA817248 23D64C69 829285BD'\
'BFF53771 6102C188 2E202250 E0FA5EF3 A384CD34 A20FFD1F'\
'BC91E0C5 32A8A421 BC4AFE3C D47F2232 3EB4BAE1 A0078981'
assert next(out).hex() == rand_val2.replace(' ', '').lower()
personalzation_str = '404142 43444546'\
'4748494A 4B4C4D4E 4F505152 53545556 5758595A 5B5C5D5E'\
'5F606162 63646566 6768696A 6B6C6D6E 6F707172 73747576'
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(personalzation_str))
# first call to generate
out = drbg.generator(num_bytes=512//8)
rand_val1 = '0DD9C855 89F357C3 89D6AF8D E9D734A9'\
'17C771EF 2D8816B9 82596ED1 2DB45D73 4A626808 35C02FDA'\
'66B08E1A 369AE218 F26D5210 AD564248 872D7A28 784159C3'
assert next(out).hex() == rand_val1.replace(' ', '').lower()
# second call to generate
out = drbg.generator(num_bytes=512//8)
rand_val2 = '46B4F475 6AE715E0 E51681AB 2932DE15'\
'23BE5D13 BAF0F458 8B11FE37 2FDA37AB E3683173 41BC8BA9'\
'1FC5D85B 7FB8CA8F BC309A75 8FD6FCA9 DF43C766 0B221322'
assert next(out).hex() == rand_val2.replace(' ', '').lower()
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(personalzation_str))
# first call to generate
out = drbg.generator(num_bytes=512//8, additional_data=hex2bytes(additional_data1))
rand_val1 = '1478F29E 94B02CB4 0D3AAB86 245557CE'\
'13A8CA2F DB657D98 EFC19234 6B9FAC33 EA58ADA2 CCA432CC'\
'DEFBCDAA 8B82F553 EF966134 E2CD139F 15F01CAD 568565A8'
assert next(out).hex() == rand_val1.replace(' ', '').lower()
# first call to generate
out = drbg.generator(num_bytes=512//8, additional_data=hex2bytes(additional_data2))
rand_val2 = '497C7A16 E88A6411 F8FCE10E F56763C6'\
'1025801D 8F51A743 52D682CC 23A0A8E6 73CAE032 28939064'\
'7DC683B7 342885D6 B76AB1DA 696D3E97 E22DFFDD FFFD8DF0'
assert next(out).hex() == rand_val2.replace(' ', '').lower()
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
entropy1 = '808182 83848586'\
'8788898A 8B8C8D8E 8F909192 93949596 9798999A 9B9C9D9E'\
'9FA0A1A2 A3A4A5A6 A7A8A9AA ABACADAE AFB0B1B2 B3B4B5B6'
entropy2 = 'C0C1C2 C3C4C5C6'\
'C7C8C9CA CBCCCDCE CFD0D1D2 D3D4D5D6 D7D8D9DA DBDCDDDE'\
'DFE0E1E2 E3E4E5E6 E7E8E9EA EBECEDEE EFF0F1F2 F3F4F5F6'
# first reseed
drbg.reseed(entropy=hex2bytes(entropy1))
# generate
out = drbg.generator(num_bytes=512//8)
rand_val1 = 'FABD0AE2 5C69DC2E FDEFB7F2 0C5A31B5'\
'7AC938AB 771AA19B F8F5F146 8F665C93 8C9A1A5D F0628A56'\
'90F15A1A D8A613F3 1BBD65EE AD5457D5 D26947F2 9FE91AA7'
assert next(out).hex() == rand_val1.replace(' ', '').lower()
# second reseed
drbg.reseed(entropy=hex2bytes(entropy2))
# generate
out = drbg.generator(num_bytes=512//8)
rand_val2 = '6BD925B0 E1C232EF D67CCD84 F722E927'\
'ECB46AB2 B7400147 77AF14BA 0BBF53A4 5BDBB62B 3F7D0B9C'\
'8EEAD057 C0EC754E F8B53E60 A1F434F0 5946A8B6 86AFBC7A'
assert next(out).hex() == rand_val2.replace(' ', '').lower()
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
# first reseed
drbg.reseed(entropy=hex2bytes(entropy1), additional_data=hex2bytes(additional_data1))
# generate
out = drbg.generator(num_bytes=512//8)
rand_val1 = '085D57AF 6BABCF2B 9AEEF387 D531650E'\
'6A505C54 406AB37A 52899E0E CAB3632B 7A068A28 14C6DF6A'\
'E532B658 D0D9741C 84775FEE 45B684CD BDC25FBC B4D8F310'
assert next(out).hex() == rand_val1.replace(' ', '').lower()
# second reseed
drbg.reseed(entropy=hex2bytes(entropy2), additional_data=hex2bytes(additional_data2))
# generate
out = drbg.generator(num_bytes=512//8)
rand_val2 = '9B219FD9 0DE2A08E 493405CF 874417B5'\
'826770F3 94481555 DC668ACD 96B9A3E5 6F9D2C32 5E26D47C'\
'1DFCFC8F BF86126F 40A1E639 60F62749 342ECDB7 1B240DC6'
assert next(out).hex() == rand_val2.replace(' ', '').lower()
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(personalzation_str))
# first reseed
drbg.reseed(entropy=hex2bytes(entropy1))
# generate
out = drbg.generator(num_bytes=512//8)
rand_val1 = 'D8B67130 714194FF E5B2A35D BCD5E1A2'\
'9942AD5C 68F3DEB9 4ADD9E9E BAD86067 EDF04915 FB40C391'\
'EAE70C65 9EAAE7EF 11A3D46A 5B085EDD 90CC72CE A989210B'
assert next(out).hex() == rand_val1.replace(' ', '').lower()
# second reseed
drbg.reseed(entropy=hex2bytes(entropy2))
# generate
out = drbg.generator(num_bytes=512//8)
rand_val2 = '8BBA71C2 583F2530 C259C907 84A59AC4'\
'4D1C8056 917CCF38 8788102D 73824C6C 11D5D63B E1F01017'\
'D884CD69 D9334B9E BC01E7BD 8FDF2A8E 52572293 DC21C0E1'
assert next(out).hex() == rand_val2.replace(' ', '').lower()
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(personalzation_str))
# first reseed
drbg.reseed(entropy=hex2bytes(entropy1), additional_data=hex2bytes(additional_data1))
# generate
out = drbg.generator(num_bytes=512//8)
rand_val1 = '44D78BBC 3EB67C59 C22F6C31 003D212A'\
'7837CCD8 4C438B55 150FD013 A8A78FE8 EDEA81C6 72E4B8DD'\
'C8183886 E69C2E17 7DF574C1 F190DF27 1850F8CE 55EF20B8'
assert next(out).hex() == rand_val1.replace(' ', '').lower()
# second reseed
drbg.reseed(entropy=hex2bytes(entropy2), additional_data=hex2bytes(additional_data2))
# generate
out = drbg.generator(num_bytes=512//8)
rand_val2 = '917780DC 0CE9989F EE6C0806 D6DA123A'\
'18252947 58D4E1B5 82687231 780A2A9C 33F1D156 CCAD3277'\
'64B29A4C B2690177 AE96EF9E E92AD0C3 40BA0FD1 203C02C6'\
assert next(out).hex() == rand_val2.replace(' ', '').lower()
if __name__ == "__main__":
test_hmac_drbg_correctness() | 11,782 | 38.673401 | 113 | py |
XFL | XFL-master/test/common/crypto/csprng/test_hmac_drbg.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.crypto.csprng.drbg import get_drbg_inst
"""
HMAC_DRBG
Tese cases provide by:
https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Standards-and-Guidelines/documents/examples/HMAC_DRBG.pdf
Test method=sha256
Ignore sha1, sha224, sha384, sha512
"""
def hex2bytes(a):
return bytes.fromhex(a.replace(' ', ''))
def test_hmac_drbg_correctness():
entropy = '000102 03040506'\
'0708090A 0B0C0D0E 0F101112 13141516 1718191A 1B1C1D1E'\
'1F202122 23242526 2728292A 2B2C2D2E 2F303132 33343536'
nonce = '20212223 24252627'
additional_data = ''
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
# first call to generate
out = drbg.gen(num_bytes=512//8)
rand_val1 = 'D67B8C17 34F46FA3 F763CF57 C6F9F4F2'\
'DC1089BD 8BC1F6F0 23950BFC 56176352 08C85012 38AD7A44'\
'00DEFEE4 6C640B61 AF77C2D1 A3BFAA90 EDE5D207 406E5403'
assert out.hex() == rand_val1.replace(' ', '').lower()
# second call to generate
out = drbg.gen(num_bytes=512//8)
rand_val1 = '8FDAEC20 F8B42140 7059E358 8920DA7E'\
'DA9DCE3C F8274DFA 1C59C108 C1D0AA9B 0FA38DA5 C792037C'\
'4D33CD07 0CA7CD0C 5608DBA8 B8856546 39DE2187 B74CB263'
assert out.hex() == rand_val1.replace(' ', '').lower()
additional_data1 = '606162 63646566'\
'6768696A 6B6C6D6E 6F707172 73747576 7778797A 7B7C7D7E'\
'7F808182 83848586 8788898A 8B8C8D8E 8F909192 93949596'
additional_data2 = 'A0A1A2 A3A4A5A6'\
'A7A8A9AA ABACADAE AFB0B1B2 B3B4B5B6 B7B8B9BA BBBCBDBE'\
'BFC0C1C2 C3C4C5C6 C7C8C9CA CBCCCDCE CFD0D1D2 D3D4D5D6'
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
# first call to generate
out = drbg.gen(num_bytes=512//8, additional_data=hex2bytes(additional_data1))
rand_val1 = '41878735 8135419B 93813353 5306176A'\
'FB251CDD 2BA37988 59B566A0 5CFB1D68 0EA92585 6D5B84D5'\
'6ADAE870 45A6BA28 D2C908AB 75B7CC41 431FAC59 F38918A3'
assert out.hex() == rand_val1.replace(' ', '').lower()
# second call to generate
out = drbg.gen(num_bytes=512//8, additional_data=hex2bytes(additional_data2))
rand_val2 = '7C067BDD CA817248 23D64C69 829285BD'\
'BFF53771 6102C188 2E202250 E0FA5EF3 A384CD34 A20FFD1F'\
'BC91E0C5 32A8A421 BC4AFE3C D47F2232 3EB4BAE1 A0078981'
assert out.hex() == rand_val2.replace(' ', '').lower()
personalzation_str = '404142 43444546'\
'4748494A 4B4C4D4E 4F505152 53545556 5758595A 5B5C5D5E'\
'5F606162 63646566 6768696A 6B6C6D6E 6F707172 73747576'
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(personalzation_str))
# first call to generate
out = drbg.gen(num_bytes=512//8)
rand_val1 = '0DD9C855 89F357C3 89D6AF8D E9D734A9'\
'17C771EF 2D8816B9 82596ED1 2DB45D73 4A626808 35C02FDA'\
'66B08E1A 369AE218 F26D5210 AD564248 872D7A28 784159C3'
assert out.hex() == rand_val1.replace(' ', '').lower()
# second call to generate
out = drbg.gen(num_bytes=512//8)
rand_val2 = '46B4F475 6AE715E0 E51681AB 2932DE15'\
'23BE5D13 BAF0F458 8B11FE37 2FDA37AB E3683173 41BC8BA9'\
'1FC5D85B 7FB8CA8F BC309A75 8FD6FCA9 DF43C766 0B221322'
assert out.hex() == rand_val2.replace(' ', '').lower()
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(personalzation_str))
# first call to generate
out = drbg.gen(num_bytes=512//8, additional_data=hex2bytes(additional_data1))
rand_val1 = '1478F29E 94B02CB4 0D3AAB86 245557CE'\
'13A8CA2F DB657D98 EFC19234 6B9FAC33 EA58ADA2 CCA432CC'\
'DEFBCDAA 8B82F553 EF966134 E2CD139F 15F01CAD 568565A8'
assert out.hex() == rand_val1.replace(' ', '').lower()
# first call to generate
out = drbg.gen(num_bytes=512//8, additional_data=hex2bytes(additional_data2))
rand_val2 = '497C7A16 E88A6411 F8FCE10E F56763C6'\
'1025801D 8F51A743 52D682CC 23A0A8E6 73CAE032 28939064'\
'7DC683B7 342885D6 B76AB1DA 696D3E97 E22DFFDD FFFD8DF0'
assert out.hex() == rand_val2.replace(' ', '').lower()
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
entropy1 = '808182 83848586'\
'8788898A 8B8C8D8E 8F909192 93949596 9798999A 9B9C9D9E'\
'9FA0A1A2 A3A4A5A6 A7A8A9AA ABACADAE AFB0B1B2 B3B4B5B6'
entropy2 = 'C0C1C2 C3C4C5C6'\
'C7C8C9CA CBCCCDCE CFD0D1D2 D3D4D5D6 D7D8D9DA DBDCDDDE'\
'DFE0E1E2 E3E4E5E6 E7E8E9EA EBECEDEE EFF0F1F2 F3F4F5F6'
# first reseed
drbg.reseed(entropy=hex2bytes(entropy1))
# generate
out = drbg.gen(num_bytes=512//8)
rand_val1 = 'FABD0AE2 5C69DC2E FDEFB7F2 0C5A31B5'\
'7AC938AB 771AA19B F8F5F146 8F665C93 8C9A1A5D F0628A56'\
'90F15A1A D8A613F3 1BBD65EE AD5457D5 D26947F2 9FE91AA7'
assert out.hex() == rand_val1.replace(' ', '').lower()
# second reseed
drbg.reseed(entropy=hex2bytes(entropy2))
# generate
out = drbg.gen(num_bytes=512//8)
rand_val2 = '6BD925B0 E1C232EF D67CCD84 F722E927'\
'ECB46AB2 B7400147 77AF14BA 0BBF53A4 5BDBB62B 3F7D0B9C'\
'8EEAD057 C0EC754E F8B53E60 A1F434F0 5946A8B6 86AFBC7A'
assert out.hex() == rand_val2.replace(' ', '').lower()
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(additional_data))
# first reseed
drbg.reseed(entropy=hex2bytes(entropy1), additional_data=hex2bytes(additional_data1))
# generate
out = drbg.gen(num_bytes=512//8)
rand_val1 = '085D57AF 6BABCF2B 9AEEF387 D531650E'\
'6A505C54 406AB37A 52899E0E CAB3632B 7A068A28 14C6DF6A'\
'E532B658 D0D9741C 84775FEE 45B684CD BDC25FBC B4D8F310'
assert out.hex() == rand_val1.replace(' ', '').lower()
# second reseed
drbg.reseed(entropy=hex2bytes(entropy2), additional_data=hex2bytes(additional_data2))
# generate
out = drbg.gen(num_bytes=512//8)
rand_val2 = '9B219FD9 0DE2A08E 493405CF 874417B5'\
'826770F3 94481555 DC668ACD 96B9A3E5 6F9D2C32 5E26D47C'\
'1DFCFC8F BF86126F 40A1E639 60F62749 342ECDB7 1B240DC6'
assert out.hex() == rand_val2.replace(' ', '').lower()
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(personalzation_str))
# first reseed
drbg.reseed(entropy=hex2bytes(entropy1))
# generate
out = drbg.gen(num_bytes=512//8)
rand_val1 = 'D8B67130 714194FF E5B2A35D BCD5E1A2'\
'9942AD5C 68F3DEB9 4ADD9E9E BAD86067 EDF04915 FB40C391'\
'EAE70C65 9EAAE7EF 11A3D46A 5B085EDD 90CC72CE A989210B'
assert out.hex() == rand_val1.replace(' ', '').lower()
# second reseed
drbg.reseed(entropy=hex2bytes(entropy2))
# generate
out = drbg.gen(num_bytes=512//8)
rand_val2 = '8BBA71C2 583F2530 C259C907 84A59AC4'\
'4D1C8056 917CCF38 8788102D 73824C6C 11D5D63B E1F01017'\
'D884CD69 D9334B9E BC01E7BD 8FDF2A8E 52572293 DC21C0E1'
assert out.hex() == rand_val2.replace(' ', '').lower()
# ------------------------------------------------------------------------------
drbg = get_drbg_inst(name='hmac_drbg',
entropy=hex2bytes(entropy),
method='sha256',
nonce=hex2bytes(nonce),
additional_data=hex2bytes(personalzation_str))
# first reseed
drbg.reseed(entropy=hex2bytes(entropy1), additional_data=hex2bytes(additional_data1))
# generate
out = drbg.gen(num_bytes=512//8)
rand_val1 = '44D78BBC 3EB67C59 C22F6C31 003D212A'\
'7837CCD8 4C438B55 150FD013 A8A78FE8 EDEA81C6 72E4B8DD'\
'C8183886 E69C2E17 7DF574C1 F190DF27 1850F8CE 55EF20B8'
assert out.hex() == rand_val1.replace(' ', '').lower()
# second reseed
drbg.reseed(entropy=hex2bytes(entropy2), additional_data=hex2bytes(additional_data2))
# generate
out = drbg.gen(num_bytes=512//8)
rand_val2 = '917780DC 0CE9989F EE6C0806 D6DA123A'\
'18252947 58D4E1B5 82687231 780A2A9C 33F1D156 CCAD3277'\
'64B29A4C B2690177 AE96EF9E E92AD0C3 40BA0FD1 203C02C6'\
assert out.hex() == rand_val2.replace(' ', '').lower()
| 11,525 | 38.204082 | 113 | py |
XFL | XFL-master/test/common/crypto/csprng/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/test/common/fedavg/otp/test_trainer3.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# import grpc
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import trainer_pb2_grpc
# from service.trainer import TrainerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import get_fedavg_trainer_inst
# from random_input import param_torch, param_numpy, weight_factors, sec_conf
# def do_fedavg(id, sec_conf):
# fedavg_trainer = get_fedavg_trainer_inst(sec_conf)
# if 'torch' in sec_conf['data_type']:
# local_weight = param_torch[id-1]
# elif 'numpy' in sec_conf['data_type']:
# local_weight = param_numpy[id-1]
# weight_factor = weight_factors[id-1]
# fedavg_trainer.aggregate(local_weight, weight_factor)
# if __name__ == "__main__":
# id = 'node-3'
# FedNode.init_fednode()
# FedNode.config["node_id"] = str(id)
# FedNode.node_id = str(id)
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# trainer_pb2_grpc.add_TrainerServicer_to_server(TrainerService(), server)
# FedNode.add_server(server, "trainer")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# for conf in sec_conf:
# do_fedavg(3, conf)
| 2,072 | 29.485294 | 87 | py |
XFL | XFL-master/test/common/fedavg/otp/test_trainer1.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# import grpc
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import trainer_pb2_grpc
# from service.trainer import TrainerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import get_fedavg_trainer_inst
# from random_input import param_torch, param_numpy, weight_factors, sec_conf
# def do_fedavg(id, sec_conf):
# fedavg_trainer = get_fedavg_trainer_inst(sec_conf)
# if 'torch' in sec_conf['data_type']:
# local_weight = param_torch[id-1]
# elif 'numpy' in sec_conf['data_type']:
# local_weight = param_numpy[id-1]
# weight_factor = weight_factors[id-1]
# fedavg_trainer.aggregate(local_weight, weight_factor)
# if __name__ == "__main__":
# id = 'node-1'
# FedNode.init_fednode()
# FedNode.config["node_id"] = str(id)
# FedNode.node_id = str(id)
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# trainer_pb2_grpc.add_TrainerServicer_to_server(TrainerService(), server)
# FedNode.add_server(server, "trainer")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# for conf in sec_conf:
# do_fedavg(1, conf)
| 2,075 | 29.985075 | 87 | py |
XFL | XFL-master/test/common/fedavg/otp/random_input.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import numpy as np
# import torch
# import random
# from collections import OrderedDict
# seed = 0
# torch.manual_seed(seed)
# # torch.cuda.manual_seed_all(seed)
# np.random.seed(seed)
# random.seed(seed)
# # torch.backends.cudnn.deterministic = True
# def gen_params(dtype: str):
# shape_dict = OrderedDict(
# {
# 'a': (2, 3, 4),
# 'b': (5),
# 'c': ()
# }
# )
# w = OrderedDict()
# for k, v in shape_dict.items():
# if dtype == 'numpy':
# w[k] = np.random.random(v).astype(np.float32) * 2 - 1
# elif dtype == 'torch':
# w[k] = torch.rand(v) * 2 - 1
# return w
# num_trainer = 3
# param_torch = [gen_params('torch') for i in range(num_trainer)]
# param_numpy = [gen_params('numpy') for i in range(num_trainer)]
# weight_factors = [random.random() for i in range(num_trainer)]
# sec_conf = [
# {
# "method": "otp",
# "key_bitlength": 128,
# "data_type": "torch.Tensor",
# "key_exchange": {
# "key_bitlength": 3072,
# "optimized": True
# },
# "csprng": {
# "name": "hmac_drbg",
# "method": "sha512",
# }
# },
# {
# "method": "otp",
# "key_bitlength": 128,
# "data_type": "numpy.ndarray",
# "key_exchange": {
# "key_bitlength": 3072,
# "optimized": True
# },
# "csprng": {
# "name": "hmac_drbg",
# "method": "sha512",
# }
# }
# ]
| 2,199 | 24.882353 | 74 | py |
XFL | XFL-master/test/common/fedavg/otp/test_trainer2.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# import grpc
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import trainer_pb2_grpc
# from service.trainer import TrainerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import get_fedavg_trainer_inst
# from random_input import param_torch, param_numpy, weight_factors, sec_conf
# def do_fedavg(id, sec_conf):
# fedavg_trainer = get_fedavg_trainer_inst(sec_conf)
# if 'torch' in sec_conf['data_type']:
# local_weight = param_torch[id-1]
# elif 'numpy' in sec_conf['data_type']:
# local_weight = param_numpy[id-1]
# weight_factor = weight_factors[id-1]
# fedavg_trainer.aggregate(local_weight, weight_factor)
# if __name__ == "__main__":
# id = 'node-2'
# FedNode.init_fednode()
# FedNode.config["node_id"] = str(id)
# FedNode.node_id = str(id)
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# trainer_pb2_grpc.add_TrainerServicer_to_server(TrainerService(), server)
# FedNode.add_server(server, "trainer")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# for conf in sec_conf:
# do_fedavg(2, conf)
| 2,072 | 29.485294 | 87 | py |
XFL | XFL-master/test/common/fedavg/otp/test_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# from typing import OrderedDict
# from functools import reduce
# import grpc
# import numpy as np
# import torch
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import scheduler_pb2_grpc
# from service.scheduler import SchedulerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import get_fedavg_scheduler_inst
# from random_input import param_torch, param_numpy, weight_factors, sec_conf
# def almost_equal(a, b):
# for k in a:
# if isinstance(a[k], np.ndarray):
# return np.all(a[k] - b[k] < 1e-4)
# else:
# return torch.all(a[k] - b[k] < 1e-4)
# def do_fedavg(sec_conf):
# fedavg_trainer = get_fedavg_scheduler_inst(sec_conf)
# result = fedavg_trainer.aggregate(weight_factors)
# def f(x, y):
# for k in x:
# x[k] += y[k]
# return x
# if 'torch' in sec_conf["data_type"]:
# param = param_torch
# elif 'numpy' in sec_conf["data_type"]:
# param = param_numpy
# for i, item in enumerate(param):
# for k in item:
# param[i][k] *= weight_factors[i]
# expected_result = reduce(f, param)
# sum_weight_factors = sum(weight_factors)
# for k in expected_result:
# expected_result[k] /= sum_weight_factors
# assert almost_equal(result, expected_result)
# if __name__ == "__main__":
# FedNode.init_fednode(is_scheduler=True)
# FedNode.config["node_id"] = 'scheduler'
# FedNode.node_id = 'scheduler'
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# scheduler_pb2_grpc.add_SchedulerServicer_to_server(SchedulerService(), server)
# FedNode.add_server(server, "scheduler")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# for conf in sec_conf:
# do_fedavg(conf)
| 2,756 | 28.967391 | 87 | py |
XFL | XFL-master/test/common/communication/test_channel.py | import pytest
from unittest.mock import patch
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel, FullConnectedChannel
from common.communication.gRPC.python.commu import Commu
from service.fed_config import FedConfig
class TestFullConnectedChannel():
@pytest.mark.parametrize("scheduler_id, trainer_ids, node_id", [
("S", ["A", "B"], "A"), ("S", ["A", "B"], "C"), ("S", ["A", "C"], "A"), ("S", ["A"], "A")
])
def test_init(self, scheduler_id, trainer_ids, node_id):
Commu.scheduler_id = scheduler_id
Commu.trainer_ids = trainer_ids
Commu.node_id = node_id
if node_id == "C" or trainer_ids in (["A", "C"], ["A"]):
with pytest.raises(ValueError):
chann = FullConnectedChannel(name='full', ids=["A", "B"], job_id='1', auto_offset=True)
else:
chann = FullConnectedChannel(name='full', ids=["A", "B"], job_id='1', auto_offset=True)
@pytest.mark.parametrize("accumulate_offset", [True, False])
def test_gen_send_key(self, accumulate_offset):
Commu.scheduler_id = "S"
Commu.trainer_ids = ["A", "B"]
Commu.node_id = "A"
chann = FullConnectedChannel(name='full', ids=["A", "B"], job_id='1', auto_offset=accumulate_offset)
send_key = chann._gen_send_key(remote_id='B', tag='@', accumulate_offset=accumulate_offset)
assert send_key == "1~full~0~@~A->B"
if accumulate_offset:
assert chann._send_offset == 1
else:
assert chann._send_offset == 0
@pytest.mark.parametrize("accumulate_offset", [True, False])
def test_gen_recv_key(self, accumulate_offset):
Commu.scheduler_id = "S"
Commu.trainer_ids = ["A", "B"]
Commu.node_id = "A"
chann = FullConnectedChannel(name='full', ids=["A", "B"], job_id='1', auto_offset=accumulate_offset)
send_key = chann._gen_recv_key(remote_id='B', tag='@', accumulate_offset=accumulate_offset)
assert send_key == "1~full~0~@~B->A"
if accumulate_offset:
assert chann._recv_offset == 1
else:
assert chann._recv_offset == 0
def test_send(self, mocker):
Commu.scheduler_id = "S"
Commu.trainer_ids = ["A", "B"]
Commu.node_id = "A"
mocker.patch.object(Commu, "send", return_value=0)
chann = FullConnectedChannel(name='full', ids=["A", "B"], job_id='1', auto_offset=True)
res = chann._send(remote_id="B", value=123)
assert res == 0
@pytest.mark.parametrize("wait", [True, False])
def test_recv(self, wait, mocker):
Commu.scheduler_id = "S"
Commu.trainer_ids = ["A", "B"]
Commu.node_id = "A"
chann = FullConnectedChannel(name='full', ids=["A", "B"], job_id='1', auto_offset=True)
mocker.patch.object(Commu, "recv", return_value=123)
data = chann._recv(remote_id="B", wait=wait)
assert data == 123
assert chann._recv_offset == 1
chann = FullConnectedChannel(name='full', ids=["A", "B"], job_id='1', auto_offset=True)
mocker.patch.object(Commu, "recv", return_value=None)
data = chann._recv(remote_id="B", wait=wait)
assert data is None
if wait:
assert chann._recv_offset == 1
else:
assert chann._recv_offset == 0
def test_swap(self, mocker):
Commu.scheduler_id = "S"
Commu.trainer_ids = ["A", "B"]
Commu.node_id = "A"
chann = FullConnectedChannel(name='full', ids=["A", "B"], job_id='1', auto_offset=True)
mocker.patch.object(chann, "_recv", return_value=123)
mocker.patch.object(chann, "_send", return_value=0)
data = chann._swap(remote_id='B', value=123)
assert data == 123
mocker.patch.object(chann, "_send", return_value=1)
with pytest.raises(ValueError):
data = chann._swap(remote_id='B', value=123)
@pytest.mark.parametrize("parallel", [True, False])
def test_broadcast(self, parallel, mocker):
with patch('common.communication.gRPC.python.channel.PARALLEL', parallel):
Commu.scheduler_id = "S"
Commu.trainer_ids = ["A", "B", "C"]
Commu.node_id = "A"
chann = FullConnectedChannel(name='full', ids=["A", "B", "C"], job_id='1', auto_offset=True)
mocker.patch.object(chann, "_send", return_value=0)
res = chann._broadcast(remote_ids=["B", "C"], value=123)
assert res == 0
assert chann._send_offset == 1
mocker.patch.object(chann, "_send", return_value=1)
with pytest.raises(ConnectionError):
res = chann._broadcast(remote_ids=["B", "C"], value=123)
@pytest.mark.parametrize("parallel", [True, False])
def test_scatter(self, parallel, mocker):
with patch('common.communication.gRPC.python.channel.PARALLEL', parallel):
Commu.scheduler_id = "S"
Commu.trainer_ids = ["A", "B", "C"]
Commu.node_id = "A"
chann = FullConnectedChannel(name='full', ids=["A", "B", "C"], job_id='1', auto_offset=True)
mocker.patch.object(chann, "_send", return_value=0)
res = chann._scatter(remote_ids=["B", "C"], values=[123, 123])
assert res == 0
assert chann._send_offset == 1
mocker.patch.object(chann, "_send", return_value=1)
with pytest.raises(ConnectionError):
res = chann._scatter(remote_ids=["B", "C"], values=[123, 123])
@pytest.mark.parametrize("parallel", [True, False])
def test_collect(sefl, parallel, mocker):
with patch('common.communication.gRPC.python.channel.PARALLEL', parallel):
Commu.scheduler_id = "S"
Commu.trainer_ids = ["A", "B", "C"]
Commu.node_id = "A"
chann = FullConnectedChannel(name='full', ids=["A", "B", "C"], job_id='1', auto_offset=True)
mocker.patch.object(chann, "_recv", return_value=123)
data = chann._collect(remote_ids=["B", "C"])
assert data == [123, 123]
@pytest.mark.parametrize("job_id", ['1', ''])
def test_DualChannel(job_id, mocker):
mocker.patch.object(Commu, "send", return_value=0)
mocker.patch.object(Commu, "recv", return_value=123)
mocker.patch.object(Commu, "get_job_id", returen_value='1')
Commu.trainer_ids = ["A", "B"]
Commu.scheduler_id = "S"
Commu.node_id = "A"
dual_chann = DualChannel(name='dual', ids=["A", "B"], job_id=job_id, auto_offset=True)
assert dual_chann.remote_id == "B"
status = dual_chann.send(value=123, tag='@', use_pickle=True)
assert status == 0
data = dual_chann.recv(tag='@', use_pickle=True, wait=True)
assert data == 123
data = dual_chann.swap(value=123, tag='@', use_pickle=True)
assert data == 123
@pytest.mark.parametrize("ids, root_id, job_id", [
(["A", "B", "C"], "A", '1'), ([], "", "")
])
def test_BroadcastChannel(ids, root_id, job_id, mocker):
mocker.patch.object(Commu, "send", return_value=0)
mocker.patch.object(Commu, "recv", return_value=123)
mocker.patch.object(Commu, "get_job_id", returen_value='1')
mocker.patch.object(FedConfig, "get_label_trainer", return_value=["A"])
mocker.patch.object(FedConfig, "get_trainer", return_value=["B", "C"])
Commu.trainer_ids = ["A", "B", "C"]
Commu.scheduler_id = "S"
Commu.node_id = "A"
chann = BroadcastChannel(name='dual', ids=ids, root_id=root_id, job_id=job_id, auto_offset=True)
res = chann.broadcast(value=123)
assert res == 0
assert set(chann.remote_ids) == set(["B", "C"])
res = chann.scatter(values=[123, 123])
assert res == 0
res = chann.collect()
assert res == [123, 123]
Commu.node_id = "B"
chann = BroadcastChannel(name='dual', ids=ids, root_id=root_id, job_id=job_id, auto_offset=True)
res = chann.send(value=123)
assert res == 0
res = chann.recv()
assert res == 123 | 8,248 | 40.661616 | 108 | py |
XFL | XFL-master/test/common/communication/test_commu.py | # from pytest_mock_resources import create_redis_fixture
# from common.communication.gRPC.python.commu import Commu
# # pip install "pytest-mock-resources"
# def test_commu():
# fed_info = {
# "scheduler": "1",
# "trainer": {
# "2": "",
# "3": ""
# },
# "node_id": "2"
# }
# Commu() | 353 | 21.125 | 58 | py |
XFL | XFL-master/test/common/storage/test_redis_conn.py | from redis.client import StrictRedis
from common.storage.redis.redis_conn import RedisConn
class TestRedisConn:
def test_redis_conn(self, mocker):
rc = RedisConn()
mocker.patch.object(
RedisConn, "init_job_id"
)
rc.init_redis()
def test_redis_func(self, mocker):
rc = RedisConn()
d = dict()
def mock_set(key, value, *args, **kwargs):
d[key] = value
def mock_get(key, *args, **kwargs):
return d.get(key)
def mock_incr(key, *args, **kwargs):
d[key] = int(d.get(key, 0)) + 1
def mock_del(key, *args, **kwargs):
del d[key]
def mock_exists(key, *args, **kwargs):
return key in d
mocker.patch.object(
StrictRedis, "set", side_effect=mock_set
)
mocker.patch.object(
StrictRedis, "get", side_effect=mock_get
)
mocker.patch.object(
StrictRedis, "incr", side_effect=mock_incr
)
mocker.patch.object(
StrictRedis, "delete", side_effect=mock_del
)
mocker.patch.object(
StrictRedis, "exists", side_effect=mock_exists
)
rc.cut_if_exist("what?")
rc.init_job_id()
rc.cut_if_exist("XFL_JOB_ID")
rc.set("XFL_JOB_ID", "1", ex=1)
rc.put("XFL_JOB_ID", "100")
assert rc.get("XFL_JOB_ID") == "100"
rc.set("XFL_JOB_ID", "1")
assert rc.get("XFL_JOB_ID") == "1"
rc.incr("XFL_JOB_ID")
rc.cut("XFL_JOB_ID")
| 1,581 | 24.111111 | 58 | py |
XFL | XFL-master/test/common/checker/test_checker.py | import pytest
from common.checker.checker import check, cal_num_valid, find_key_matched
from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
def test_dict():
config = {"a": 1}
rule = {"a": {}}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
def test_list():
config = {"a": {}}
rule = {"a": []}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": [5]}
rule = {
"a": [Optional(Integer())]
}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
config = {"a": []}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
# Optional
rule = {"a": [Optional(3, 5)]}
with pytest.raises(ValueError):
r = check(config, rule)
config = {"a": [7, 3]}
rule = {"a": [Optional(SomeOf(3, 5, 7))]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
config = {"a": [7, 8]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
# Any
rule = {"a": [Any()]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": [9]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
config = {"a": [[7, 8]]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
rule = {"a": [All()]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
config = {"a": [1, 1, 2]}
rule = {"a": [SomeOf(1, 2)]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
rule = {"a": [OneOf(1, 3), 2]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
def test_rest():
key = "a"
dst_keys = ["b", "c"]
assert find_key_matched(key, dst_keys) is None
config = {"a": [1]}
rule = {Integer(): [Integer()], "b": [1]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": {
"b": [1, 2, 3]
}}
rule = {String(): {
"b": [RepeatableSomeOf(Integer())]
}}
r = check(config, rule)
r.result()
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
| 2,807 | 24.527273 | 89 | py |
XFL | XFL-master/test/common/checker/test_matcher.py | import pytest
from common.checker.checker import check, cal_num_valid, find_key_matched
from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.checker.matcher import get_matched_config
def test_matcher():
config = {
"a": [1, 2, 3],
"b": {
"m": [1, 2],
"n": 4
},
"d": [3, 4],
"e": [
[1, 2]
]
}
rule = {
"a": All(),
"b": {
"n": Any()
},
"c": All(),
"d": Integer(),
"f": All()
}
result = get_matched_config(config, rule)
print(result, "-----")
assert result == {
"a": [1, 2, 3],
"b": {
"n": 4
},
"d": None
}
# def test_dict():
# config = {"a": 1}
# rule = {"a": {}}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid < num_total
# def test_list():
# config = {"a": {}}
# rule = {"a": []}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid < num_total
# config = {"a": [5]}
# rule = {
# "a": [Optional(Integer())]
# }
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid == num_total
# config = {"a": []}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid == num_total
# # Optional
# rule = {"a": [Optional(3, 5)]}
# with pytest.raises(ValueError):
# r = check(config, rule)
# config = {"a": [7, 3]}
# rule = {"a": [Optional(SomeOf(3, 5, 7))]}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid == num_total
# config = {"a": [7, 8]}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid < num_total
# # Any
# rule = {"a": [Any()]}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid < num_total
# config = {"a": [9]}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid == num_total
# config = {"a": [[7, 8]]}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid < num_total
# rule = {"a": [All()]}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid == num_total
# config = {"a": [1, 1, 2]}
# rule = {"a": [SomeOf(1, 2)]}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid < num_total
# rule = {"a": [OneOf(1, 3), 2]}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid < num_total
# def test_rest():
# key = "a"
# dst_keys = ["b", "c"]
# assert find_key_matched(key, dst_keys) is None
# config = {"a": [1]}
# rule = {Integer(): [Integer()], "b": [1]}
# r = check(config, rule)
# num_valid, num_total = cal_num_valid(r)
# assert num_valid < num_total
# config = {"a": {
# "b": [1, 2, 3]
# }}
# rule = {String(): {
# "b": [RepeatableSomeOf(Integer())]
# }}
# r = check(config, rule)
# r.result()
# num_valid, num_total = cal_num_valid(r)
# assert num_valid == num_total
| 3,579 | 23.689655 | 89 | py |
XFL | XFL-master/test/common/checker/test_types.py | import pytest
from common.checker.checker import check, cal_num_valid
from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
type_pairs = [
(
{
"a": 1,
"b": 'b',
"c": 3.5,
"d": True,
"e": 43432.34124,
"f": [343, "1232", {}]
},
{
"a": Integer(4),
"b": String("abc"),
"c": Float(1.0),
"d": Bool(False),
"e": Any(),
"f": All()
})
]
@pytest.mark.parametrize("config, rule", type_pairs)
def test_types(config, rule):
assert rule["a"].default == 4
assert rule["b"].default == "abc"
assert rule["c"].default == 1.0
assert rule["d"].default is False
assert rule["e"].default is None
assert rule["f"].default is None
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
config = {"a": "a"}
rules = [
{"a": Integer()},
{"a": Float()},
{"a": Bool()}
]
for rule in rules:
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": 1}
rule = {"a": String()}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": [{}]}
rule = {"a": [Any()]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
def test_types_method():
config = {"a": 3}
rules = [
{"a": Integer().ge(3).le(5)},
{"a": Integer().gt(1).lt(5)},
{"a": Integer().ge(4)},
{"a": Integer().gt(4).lt(2)}
]
for i, rule in enumerate(rules):
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
if i in [0, 1]:
assert num_valid == num_total
elif i in [3, 4]:
assert num_valid < num_total
config = {"a": 3}
rules = [
{"a": Float().ge(3).le(5)},
{"a": Float().gt(1).lt(5)},
{"a": Float().ge(4)},
{"a": Float().gt(4).lt(2)}
]
for i, rule in enumerate(rules):
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
if i in [0, 1]:
assert num_valid == num_total
elif i in [3, 4]:
assert num_valid < num_total
config = {"a": "324342"}
rule = {"a": String().add_rule(lambda x: x[0] == 'a')}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": {}}
rule = {String(): {}}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
config = {"a": True}
m = False
rule = {"a": Bool().add_rule(lambda x: x == m)}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": {}}
rule = {Any(): {}}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
rule = {All(): {}}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
config = {"a": {}}
rule = {"a": Any()}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
rule = {"a": All()}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
rule = {"a": All().add_rule(lambda x: x)}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
| 3,885 | 25.080537 | 89 | py |
XFL | XFL-master/test/common/checker/test_descriptor.py | import importlib
import json
import os
from pathlib import Path
from common.checker.compare import compare
def test_local_operator():
path = Path(Path(__file__).parent.parent.parent.parent) / 'demo' / 'local'
for operator in os.listdir(path):
conf_path = path / operator / '1party' / 'config' / 'trainer_config_node-1.json'
conf = json.load(open(conf_path, 'r'))[0]
module_path = '.'.join(['algorithm.config_descriptor', 'local_' + operator, 'label_trainer']) # , 'local_' + operator + '_rule'])
module = importlib.import_module(module_path)
rule = getattr(module, 'local_' + operator + '_rule')
result, itemized_result, rule_passed, rule_checked = compare(conf, rule)
assert rule_passed == rule_checked
def test_vertical_operator():
path = Path(Path(__file__).parent.parent.parent.parent) / 'demo' / 'vertical'
for operator in os.listdir(path):
for party_num in os.listdir(path / operator):
for conf_file in os.listdir(path / operator / party_num / 'config'):
if 'trainer_config' not in conf_file:
continue
conf_path = path / operator / party_num / 'config' / conf_file
if operator == 'feature_selection':
conf = json.load(open(conf_path, 'r'))[-1]
else:
conf = json.load(open(conf_path, 'r'))[0]
if 'node-1' in conf_file:
role = 'label_trainer'
elif 'assist_trainer' in conf_file:
role = 'assist_trainer'
else:
role = 'trainer'
module_path = '.'.join(['algorithm.config_descriptor', 'vertical_' + operator, role])
module = importlib.import_module(module_path)
rule = getattr(module, 'vertical_' + operator + '_' + role + '_rule')
result, itemized_result, rule_passed, rule_checked = compare(conf, rule)
print(result)
assert rule_passed == rule_checked
def test_horizontal_operator():
path = Path(Path(__file__).parent.parent.parent.parent) / 'demo' / 'horizontal'
for operator in os.listdir(path):
if operator not in ['logistic_regression', "poisson_regression", "kmeans", "linear_regression", "binning_woe_iv"]:
continue
for party_num in os.listdir(path / operator):
if not os.path.isdir(path / operator / party_num):
continue
for conf_file in os.listdir(path / operator / party_num / 'config'):
if 'trainer_config' not in conf_file:
continue
conf_path = path / operator / party_num / 'config' / conf_file
print(conf_path, "AAAAAA")
conf = json.load(open(conf_path, 'r'))[0]
if 'assist_trainer' in conf_file:
role = 'assist_trainer'
else:
role = 'label_trainer'
module_path = '.'.join(['algorithm.config_descriptor', 'horizontal_' + operator, role])
module = importlib.import_module(module_path)
rule = getattr(module, 'horizontal_' + operator + '_' + role + '_rule')
result, itemized_result, rule_passed, rule_checked = compare(conf, rule)
print(itemized_result)
print(conf, "----")
assert rule_passed == rule_checked
| 3,802 | 39.892473 | 137 | py |
XFL | XFL-master/test/common/checker/test_qualifiers.py | import pytest
from common.checker.checker import check, cal_num_valid
from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
def test_OneOf():
config = {"a": 1}
rule = {"a": OneOf(1, 2).set_default(1)}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
assert rule["a"].default == 1
rule = {
"__rule__": OneOf("a", "b").set_default_index(0),
"a": Integer()
}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
assert rule["__rule__"][0].default == "a"
config = {"c": 3.4}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": [1, 2, 3]}
rule = {
"a": [OneOf(1, 2, 3), OneOf(1, 2, 3), OneOf(1, 2, 3).add_rule(lambda x: x < config["a"][1])]
}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": [1, 2, 3]}
rule = {
"a": [OneOf(1, 2, 3), OneOf(1, 2, 3), OneOf(1, 2, 3).add_rule(lambda x, y: x < y["a"][1])]
}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": [1, 2, 3]}
rule = {
"a": [OneOf(1, 2, 3), OneOf(1, 2, 3), OneOf(1, 2, 3).add_rule(lambda x, y: x < y["b"][1])]
}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
def test_SomeOf():
config = {"a": [1, 2]}
rule = {"a": [SomeOf(1, 2, 3).set_default([2, 3])]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
assert rule["a"][0].default == [2, 3]
rule = {"a": [SomeOf(3, 4)]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
config = {"a": 3, "b": 5}
rule = {
"__rule__": SomeOf("a", "b", "c").set_default_indices(1, 2),
"a": Integer(),
"b": Float()
}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
assert rule["__rule__"][0].default == ["b", "c"]
rule = {
"__rule__": SomeOf("b", "c"),
"b": Integer(),
"c": Float()
}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
def test_RepeatableSomeOf():
config = {"a": [1, 2, 2]}
rule = {"a": [RepeatableSomeOf(1, 2, 3).set_default([2, 2, 3])]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
assert rule["a"][0].default == [2, 2, 3]
rule = {"a": [RepeatableSomeOf(2, 3).set_default_indices(1, 0, 1)]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
assert rule["a"][0].default == [3, 2, 3]
rule = {"a": [RepeatableSomeOf(3, 4)]}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
def test_Required():
config = {"a": 3, "b": 4}
rule = {
"__rule__": Required("a", "b"),
"a": Integer(),
"b": Float()
}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
rule = {
"__rule__": Required("a", "b", "c"),
"a": Integer(),
"b": Float()
}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
rule = {
"__rule__": Required("b"),
"b": Float()
}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid < num_total
def test_Optional():
config = {"a": None}
rule = {"a": Optional(2).set_default_not_none()}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
assert rule["a"].default == 2
config = {"a": 2}
r = check(config, rule)
num_valid, num_total = cal_num_valid(r)
assert num_valid == num_total
| 4,306 | 26.787097 | 100 | py |
XFL | XFL-master/test/common/utils/test_model_preserver.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from common.utils.model_preserver import ModelPreserver, os, torch
class TestModelPreserver():
@pytest.mark.parametrize('final,model_path', [(True, "test_save_dir/test.model.pth"), (False, "test_save_dir/test.model_epoch_10.pth")])
def test_save(self, mocker, final, model_path):
mocker.patch("os.makedirs")
mocker.patch("torch.save")
ModelPreserver.save("test_save_dir","test.model.pth", {}, epoch=10, final=final, suggest_threshold=0.1)
os.makedirs.assert_called_once_with("test_save_dir")
torch.save.assert_called_once_with({"state_dict":{},"suggest_threshold":0.1}, model_path)
def test_load(self, mocker):
mocker.patch("torch.load")
ModelPreserver.load("test_path")
torch.load.assert_called_once_with("test_path")
| 1,425 | 35.564103 | 140 | py |
XFL | XFL-master/test/common/utils/test_data_utils.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import pandas as pd
import os
import pathlib
import gzip
import zipfile
import tarfile
import shutil
import urllib.request
from common.utils.data_utils import download_and_extract_data, pd_train_test_split, cal_md5
url = "/"
def prepare_file():
with open("/opt/dataset/unit_test/test_raw.txt", "w") as f:
f.write("unit_test")
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
prepare_file()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
@pytest.mark.parametrize('ftype, dst_file, data_folder', [("gz", "/opt/dataset/unit_test/test.txt.gz", None),("gz", "/opt/dataset/unit_test/test.txt.gz","/opt/dataset/unit_test/")])
def test_download_and_extract_data(httpserver, ftype, dst_file, data_folder):
src_file = None
if ftype == "gz":
src_file = "/opt/dataset/unit_test/test_raw.txt.gz"
f_ungz = open("/opt/dataset/unit_test/test_raw.txt",'rb')
f_gz = gzip.open(src_file,'wb')
f_gz.writelines(f_ungz)
f_ungz.close()
f_gz.close()
to_path = None
content_f = open(src_file,"rb")
content = content_f.read()
httpserver.expect_request(url).respond_with_data(content)
download_and_extract_data(httpserver.url_for(url), None, dst_file, data_folder=data_folder, to_path=to_path)
if ftype == "gz":
with open("/opt/dataset/unit_test/test.txt","r") as f:
assert f.readline() == "unit_test"
def test_cal_md5():
md5 = cal_md5("/opt/dataset/unit_test/test_raw.txt")
assert md5 == "d16f7309f3bfab471bad7a55b919f044"
def test_pd_train_test_split():
case_df = pd.DataFrame({
'x0': np.arange(100),
'x1': np.arange(100),
'x2': 2 * np.arange(100) - 40.0,
'x3': 3 * np.arange(100) + 1.0,
'x4': np.arange(100)[::-1]
})
train_df, test_df = pd_train_test_split(case_df, 0.3)
assert len(train_df) == 70
assert len(test_df) == 30 | 2,707 | 32.85 | 181 | py |
XFL | XFL-master/test/common/utils/test_config.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from common.utils.config import parse_config, refill_config
def test_parse_config(mocker):
json_str = json.dumps(
{
"nodes": {
"trainer": {
"endpoints": [
{
"fuwuEndpointId": "trainer-endpoint-2",
"url": "grpcs://localhost:56002",
}
],
"name": "follower"
},
"label_trainer": {
"endpoints": [
{
"fuwuEndpointId": "assist-trainer-endpoint-1",
"url": "localhost:56001",
},
{
"fuwuEndpointId": "scheduler-endpoint-1",
"url": "localhost:55001",
}
],
"name": "master"
}
}
}
)
config = parse_config(json_str)
assert config == {'scheduler': {'node_id': 'label_trainer', 'host': 'localhost', 'port': '55001', 'use_tls': False, 'name': 'master'}, 'trainer': {'trainer': {
'host': 'localhost', 'port': '56002', 'use_tls': True, 'name': 'follower'}, 'assist_trainer': {'host': 'localhost', 'port': '56001', 'use_tls': False, 'name': 'master'}}}
def test_refill_config():
custom_conf = {"1":{}}
default_conf = {"2":2, "1":{"3":3}}
config = refill_config(custom_conf, default_conf)
assert config == {"2":2, "1":{"3":3}}
| 2,104 | 32.412698 | 178 | py |
XFL | XFL-master/test/common/utils/test_config_parser.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.utils.config_parser import replace_variable
def test_replace_variable():
output = {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "vertical_xgboost_[STAGE_ID].json"
},
"x": ["x_[STAGE_ID-1]"]
}
job_id = "001"
node_id = "a"
stage_id = "2"
res = replace_variable(output, stage_id, job_id, node_id)
assert res == {
"path": "/opt/checkpoints/001/a",
"model": {
"name": "vertical_xgboost_2.json"
},
"x": ["x_1"]
}
| 1,168 | 28.225 | 74 | py |
XFL | XFL-master/test/common/utils/test_logger.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from common.utils.logger import add_job_log_handler, add_job_stage_log_handler, remove_log_handler, logger
LOG_PATH = "/opt/log"
def test_add_job_log_handler():
job_handler = add_job_log_handler("unit_test", 1)
logger.info("add_job_log_handler")
assert job_handler.baseFilename == "/opt/log/unit_test/1/xfl.log"
assert os.path.exists("/opt/log/unit_test/1/xfl.log")
with open("/opt/log/unit_test/1/xfl.log") as f:
assert f.readline().split()[-1] == "add_job_log_handler"
shutil.rmtree("/opt/log/unit_test/1")
def test_add_job_stage_log_handler():
job_stage_handler = add_job_stage_log_handler('job0', "unit_test", 1, "test_model")
logger.info("add_job_stage_log_handler")
logger.info(job_stage_handler.baseFilename)
assert job_stage_handler.baseFilename == "/opt/log/job0/unit_test/stage1_test_model.log"
assert os.path.exists("/opt/log/job0/unit_test/stage1_test_model.log")
with open("/opt/log/job0/unit_test/stage1_test_model.log") as f:
assert f.readline().split()[-1] == "add_job_stage_log_handler"
shutil.rmtree("/opt/log/job0/")
def test_remove_log_handler():
job_handler = add_job_log_handler("unit_test", 1)
remove_log_handler(job_handler)
shutil.rmtree("/opt/log/unit_test") | 1,902 | 39.489362 | 106 | py |
XFL | XFL-master/test/common/utils/test_fed_config_parser.py | from common.utils.fed_conf_parser import FedConfParser
conf = {
"fed_info": {
"scheduler": {
"scheduler": "localhost:55001"
},
"trainer": {
"node-1": "localhost:56001",
"node-2": "localhost:56002"
},
"assist_trainer": {
"assist_trainer": "localhost:55002"
}
},
"redis_server": "localhost:6379",
"grpc": {
"use_tls": False
}
}
def test_fed_config_parser():
res = FedConfParser.parse_dict_conf(conf, 'node-1')
assert res == {'node_id': 'node-1',
'scheduler': {'node_id': 'scheduler', 'host': 'localhost', 'port': '55001', 'use_tls': False},
'trainer': {'assist_trainer': {'node_id': 'assist_trainer', 'host': 'localhost', 'port': '55002', 'use_tls': False}, 'node-1': {'host': 'localhost', 'port': '56001', 'use_tls': False}, 'node-2': {'host': 'localhost', 'port': '56002', 'use_tls': False}},
'redis_server': {'host': 'localhost', 'port': '6379'}}
conf["node_id"] = 'node-2'
res = FedConfParser.parse_dict_conf(conf, 'node-1')
assert res == {'node_id': 'node-2',
'scheduler': {'node_id': 'scheduler', 'host': 'localhost', 'port': '55001', 'use_tls': False},
'trainer': {'assist_trainer': {'node_id': 'assist_trainer', 'host': 'localhost', 'port': '55002', 'use_tls': False}, 'node-1': {'host': 'localhost', 'port': '56001', 'use_tls': False}, 'node-2': {'host': 'localhost', 'port': '56002', 'use_tls': False}},
'redis_server': {'host': 'localhost', 'port': '6379'}}
del conf["grpc"]
res = FedConfParser.parse_dict_conf(conf, 'node-1')
assert res == {'node_id': 'node-2',
'scheduler': {'node_id': 'scheduler', 'host': 'localhost', 'port': '55001', 'use_tls': False},
'trainer': {'assist_trainer': {'node_id': 'assist_trainer', 'host': 'localhost', 'port': '55002', 'use_tls': False}, 'node-1': {'host': 'localhost', 'port': '56001', 'use_tls': False}, 'node-2': {'host': 'localhost', 'port': '56002', 'use_tls': False}},
'redis_server': {'host': 'localhost', 'port': '6379'}} | 2,230 | 50.883721 | 273 | py |
XFL | XFL-master/test/common/utils/test_utils.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from pathlib import Path
from common.utils.utils import save_model_config
def test_save_model_config():
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test/")
p = Path("/opt/checkpoints/unit_test")
save_model_config([{"node-1":{}},{"node-2":{}}], p)
assert os.path.isfile("/opt/checkpoints/unit_test/model_config.json")
save_model_config([{"node-3":{}},{"node-4":{}}], p)
assert os.path.isfile("/opt/checkpoints/unit_test/model_config.json")
with open("/opt/checkpoints/unit_test/model_config.json") as f:
data = json.load(f)
assert len(data) == 4
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test/")
| 1,397 | 33.95 | 74 | py |
XFL | XFL-master/test/api/test_xfl.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import client
import scheduler_run
import trainer_run
import xfl
from common.utils.logger import logger
class mocker_version_info():
major = 3
minor = 7
def test_add_args(mocker):
spy_add_mutually_exclusive_group = mocker.spy(argparse.ArgumentParser,'add_mutually_exclusive_group')
mocker.spy(argparse.ArgumentParser,'add_argument')
parser = xfl.add_args(argparse.ArgumentParser(description="XFL - BaseBit Federated Learning"))
spy_add_mutually_exclusive_group.assert_called_once()
assert parser.parse_args(['-s']) == argparse.Namespace(scheduler=True, assist_trainer=False, trainer='trainer', client=None, bar=False, config_path='/opt/config')
assert parser.parse_args(['-a']) == argparse.Namespace(scheduler=False, assist_trainer=True, trainer='trainer', client=None, bar=False, config_path='/opt/config')
assert parser.parse_args(['-t']) == argparse.Namespace(scheduler=False, assist_trainer=False, trainer='trainer', client=None, bar=False, config_path='/opt/config')
assert parser.parse_args(['-c', 'start']) == argparse.Namespace(scheduler=False, assist_trainer=False, trainer='trainer', client='start', bar=False, config_path='/opt/config')
def test_main(mocker):
mocker.patch('argparse.ArgumentParser.parse_args',return_value=argparse.Namespace(
scheduler=True, assist_trainer=False, trainer='trainer', client=None, bar=False, config_path='/opt/config'
))
mocker.patch('scheduler_run.main')
xfl.main()
scheduler_run.main.assert_called_once_with("/opt/config", False)
mocker.patch('argparse.ArgumentParser.parse_args',return_value=argparse.Namespace(
scheduler=False, assist_trainer=False, trainer='trainer', client='start', bar=False, config_path='/opt/config'
))
mocker.patch('client.main')
xfl.main()
client.main.assert_called_once_with("start", "/opt/config")
mocker.patch('argparse.ArgumentParser.parse_args',return_value=argparse.Namespace(
scheduler=False, assist_trainer=True, trainer='trainer', client=None, bar=False, config_path='/opt/config'
))
mocker.patch('trainer_run.main')
xfl.main()
trainer_run.main.assert_called_once_with("assist_trainer", "assist_trainer", config_path='/opt/config')
mocker.patch('argparse.ArgumentParser.parse_args',return_value=argparse.Namespace(
scheduler=False, assist_trainer=False, trainer='trainer', client=None, bar=False, config_path='/opt/config'
))
mocker.patch('trainer_run.main')
xfl.main()
trainer_run.main.assert_called_once_with("trainer", "trainer", config_path='/opt/config')
def test_check_version(mocker):
import collections
mock_version_info = collections.namedtuple('mock_version_info', ['major', 'minor'])
mocker.patch.object(sys, 'version_info', mock_version_info(3,7))
mocker.patch.object(logger, 'error')
mocker.patch('sys.exit')
xfl.check_version()
logger.error.assert_called_once_with("Python Version is not: 3.9" )
sys.exit.assert_called_once_with(-1)
| 3,664 | 42.117647 | 179 | py |
XFL | XFL-master/test/api/test_trainer_run.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
from unittest.mock import call
import pytest
import trainer_run
from common.communication.gRPC.python import control_pb2, status_pb2
from common.storage.redis.redis_conn import RedisConn
from common.utils.logger import logger
from service.fed_config import FedConfig
from service.fed_job import FedJob
from service.fed_node import FedNode
class mock_model():
def fit(self):
pass
class mock_server():
def start(self):
pass
def wait_for_termination(self):
pass
# @pytest.fixture()
# def start_stub():
# channel = grpc.insecure_channel(
# f"localhost:56001", options=insecure_options)
# stub = trainer_pb2_grpc.TrainerStub(channel)
# return stub
def test_start_trainer_service(mocker):
from unittest.mock import call
mocker.patch.object(trainer_run.grpc, 'server', return_value=mock_server())
mocker.patch.object(FedJob, 'status')
mocker.patch("trainer_run.trainer_pb2_grpc.add_TrainerServicer_to_server")
mocker.patch.object(FedNode, 'add_server')
mocker.patch.object(logger, 'info')
mocker.patch.object(FedNode, 'listening_port', 56001)
trainer_run.start_trainer_service(status_pb2.IDLE)
trainer_run.trainer_pb2_grpc.add_TrainerServicer_to_server.assert_called()
FedNode.add_server.assert_called()
logger.info.assert_has_calls(
[call("Trainer Service Start..."), call("[::]:56001")])
assert FedJob.status == status_pb2.IDLE
@pytest.mark.parametrize('value', [(status_pb2.START_TRAIN),(status_pb2.STOP_TRAIN)])
def test_start_server(mocker,value):
mocker.patch("multiprocessing.Value", return_value=multiprocessing.Value("i", value))
mocker.patch('trainer_run.remove_log_handler', side_effect=RuntimeError())
with pytest.raises(RuntimeError) as e:
trainer_run.start_server()
trainer_run.remove_log_handler.assert_called()
def test_train(mocker):
mocker.patch.object(FedJob, "get_model", return_value=mock_model())
mocker.patch.object(FedConfig, 'get_config')
mocker.patch.object(FedConfig, 'stage_config', {"identity": "node-1", "train_info": {}})
mocker.patch.object(logger, 'info')
status = multiprocessing.Value("i", status_pb2.IDLE)
trainer_run.train(status)
assert status.value == status_pb2.SUCCESSFUL
FedJob.get_model.assert_called_once_with("node-1", {"identity": "node-1", "train_info": {}})
logger.info.assert_has_calls(
[call("node-1 Start Training..."), call("Train Model Successful.")])
def test_job_control(mocker):
mocker.patch.object(FedNode, "create_channel", return_value='55001')
mocker.patch("trainer_run.scheduler_pb2_grpc.SchedulerStub.__init__",
side_effect=lambda x: None)
mocker.patch("trainer_run.scheduler_pb2_grpc.SchedulerStub.control", create=True,
return_value=control_pb2.ControlResponse(code=0, message='test'))
mocker.patch.object(logger, 'info')
trainer_run.job_control(1)
trainer_run.scheduler_pb2_grpc.SchedulerStub.control.assert_called_once_with(
control_pb2.ControlRequest(control=1))
logger.info.assert_called_once_with(
control_pb2.ControlResponse(code=0, message='test'))
def test_main(mocker):
mocker.patch.object(RedisConn, 'init_redis')
mocker.patch.object(FedNode, 'init_fednode')
mocker.patch.object(FedNode, 'config', {})
mocker.patch('trainer_run.Commu')
mocker.patch('trainer_run.start_server')
trainer_run.main('trainer', 'node-1')
RedisConn.init_redis.assert_called_once()
FedNode.init_fednode.assert_called_once_with(
identity='trainer', debug_node_id='node-1', conf_dir='')
trainer_run.Commu.assert_called_once_with({})
trainer_run.start_server.assert_called()
| 4,366 | 36.646552 | 96 | py |
XFL | XFL-master/test/api/test_scheduler_run.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from unittest.mock import call
import scheduler_run
from common.storage.redis.redis_conn import RedisConn
from service.fed_config import FedConfig
from service.fed_job import FedJob
from service.fed_node import FedNode
from common.utils.logger import logger
from common.communication.gRPC.python import status_pb2
def test_start_server(mocker):
mocker.patch.object(FedConfig, 'load_config')
mocker.patch.object(FedNode, 'listening_port', 55001)
mocker.patch.object(FedJob, 'job_id', 1)
mocker.patch.object(FedConfig, 'trainer_config', {0: {'node-1': {}}})
mocker.patch('scheduler_run.trainer_control')
mocker.patch('scheduler_run.get_trainer_status', return_value={'node-1': status_pb2.Status(code=4, status='FAILED')})
mocker.patch.object(FedJob, 'status', status_pb2.TRAINING)
mocker.patch.object(logger, 'warning')
mocker.patch.object(logger, 'info')
mocker.patch.object(RedisConn, 'set')
mocker.patch('scheduler_run.remove_log_handler', side_effect=RuntimeError())
with pytest.raises(RuntimeError) as e:
scheduler_run.start_server('config_path', is_bar=True)
logger.info.assert_called()
scheduler_run.trainer_control.assert_called()
logger.warning.assert_called()
assert FedJob.status == status_pb2.FAILED
scheduler_run.trainer_control.assert_called()
# RedisConn.set.assert_called_once_with("XFL_JOB_STATUS_1", status_pb2.FAILED)
# RedisConn.set.assert_called_with("XFL_JOB_STATUS_1", status_pb2.FAILED)
# calls = [
# call("XFL_JOB_STATUS_1",), call("XFL_JOB_START_TIME_1",), call("XFL_JOB_END_TIME_1",)
# ]
# RedisConn.set.assert_has_calls(calls)
scheduler_run.remove_log_handler.assert_called()
def test_main(mocker):
mocker.patch.object(RedisConn,'init_redis')
mocker.patch.object(FedNode, 'init_fednode')
mocker.patch.object(FedJob, 'init_fedjob')
mocker.patch.object(FedConfig, 'load_algorithm_list')
mocker.patch('scheduler_run.Commu')
mocker.patch.object(FedNode,'config', {'node-1':'test'})
mocker.patch('scheduler_run.start_server')
scheduler_run.main('test', is_bar=True)
RedisConn.init_redis.assert_called_once()
FedNode.init_fednode.assert_called_once()
FedJob.init_fedjob.assert_called_once()
FedConfig.load_algorithm_list.assert_called_once()
scheduler_run.Commu.assert_called_once_with({'node-1':'test'})
scheduler_run.start_server.assert_called_once_with('test', True)
| 3,100 | 35.916667 | 121 | py |
XFL | XFL-master/test/api/test_client.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
import client
from common.communication.gRPC.python import (control_pb2, scheduler_pb2,
status_pb2)
from common.storage.redis.redis_conn import RedisConn
from service.fed_node import FedNode
def test_start(mocker):
mocker.patch.object(FedNode, "create_channel", return_value='55001')
mocker.patch("client.scheduler_pb2_grpc.SchedulerStub.__init__", side_effect=lambda x:None)
mocker.patch("client.scheduler_pb2_grpc.SchedulerStub.control", create=True, return_value=control_pb2.ControlResponse(jobId=1,code=1, message='test', nodeLogPath={}, stageNodeLogPath={}))
client.start()
client.scheduler_pb2_grpc.SchedulerStub.control.assert_called_once_with(control_pb2.ControlRequest(control = control_pb2.START))
def test_stop(mocker):
mocker.patch.object(FedNode, "create_channel", return_value='55001')
mocker.patch("client.scheduler_pb2_grpc.SchedulerStub.__init__", side_effect=lambda x:None)
mocker.patch("client.scheduler_pb2_grpc.SchedulerStub.control", create=True, return_value=control_pb2.ControlResponse(jobId=1,code=1, message='test'))
client.stop()
client.scheduler_pb2_grpc.SchedulerStub.control.assert_called_once_with(control_pb2.ControlRequest(control = control_pb2.STOP))
def test_status(mocker):
mocker.patch.object(FedNode, "create_channel", return_value='55001')
mocker.patch("client.scheduler_pb2_grpc.SchedulerStub.__init__", side_effect=lambda x:None)
mocker.patch("client.scheduler_pb2_grpc.SchedulerStub.status", create=True, return_value=status_pb2.StatusResponse(jobId=0,schedulerStatus=status_pb2.Status(code=1,status='IDLE'),trainerStatus={"node-1":status_pb2.Status(code=1,status='IDLE')}))
client.status()
client.scheduler_pb2_grpc.SchedulerStub.status.assert_called_once_with(status_pb2.StatusRequest())
def test_algo(mocker):
mocker.patch.object(FedNode, "create_channel", return_value='55001')
mocker.patch("client.scheduler_pb2_grpc.SchedulerStub.__init__", side_effect=lambda x:None)
mocker.patch("client.scheduler_pb2_grpc.SchedulerStub.getAlgorithmList", create=True, return_value=scheduler_pb2.GetAlgorithmListResponse(algorithmList=['test'],defaultConfigMap={"test_map":scheduler_pb2.DefaultConfig(config={"test_k":"test_v"})}))
client.algo()
client.scheduler_pb2_grpc.SchedulerStub.getAlgorithmList.assert_called_once_with(scheduler_pb2.GetAlgorithmListRequest())
def test_stage(mocker):
mocker.patch.object(FedNode, "create_channel", return_value='55001')
mocker.patch("client.scheduler_pb2_grpc.SchedulerStub.__init__", side_effect=lambda x:None)
mocker.patch(
"client.scheduler_pb2_grpc.SchedulerStub.getStage",
create=True,
return_value=scheduler_pb2.GetStageResponse(
code=0,
currentStageId=0,
totalStageNum=1,
currentStageName="0"
)
)
client.stage()
client.scheduler_pb2_grpc.SchedulerStub.getStage.assert_called_once_with(scheduler_pb2.GetStageRequest())
@pytest.mark.parametrize('cmd',
[
('start'),
('stop'),
('status'),
('algo')
])
def test_main(mocker,cmd):
mocker.patch.object(RedisConn, "init_redis")
mocker.patch.object(FedNode, "init_fednode")
mocker.patch.object(client, cmd)
client.main(cmd)
getattr(client,cmd).assert_called()
| 4,168 | 46.375 | 252 | py |
XFL | XFL-master/test/algorithm/core/test_optimizer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from algorithm.core.optimizer.torch_optimizer import get_optimizer
def test_get_optimizer():
optim = get_optimizer('ASGD')
assert issubclass(optim, torch.optim.ASGD)
| 801 | 31.08 | 74 | py |
XFL | XFL-master/test/algorithm/core/test_output.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import pytest
from algorithm.core.output import TableSaver
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
class TestTableSaver():
def test_save(self):
ts = TableSaver('/opt/dataset/unit_test/table.tb')
ts.save(0,{"x0":1.0,"x1":2.0},prefix='unit',suffix="test", append=False)
assert os.path.exists(
"/opt/dataset/unit_test/unit_table_test.tb")
ts.save(1,{"x2":3.0,"x3":4.0},prefix='unit',suffix="test", append=True)
with open("/opt/dataset/unit_test/unit_table_test.tb",'r') as f:
assert f.readlines() == ['epoch,x0,x1\n', '0,1,2\n', '1,3,4\n']
| 1,468 | 33.97619 | 80 | py |
XFL | XFL-master/test/algorithm/core/test_data_io_spark.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import pytest
# import numpy as np
# import os
# import pyspark.pandas as pd
# import shutil
# from algorithm.core.data_io_spark import ValidationNumpyDataset,CsvReader
# def prepare_data():
# case_df = pd.DataFrame({
# 'x0': np.arange(10),
# 'x1': np.arange(10),
# 'x2': 2 * np.arange(10) - 10,
# })
# case_df['y'] = np.where(
# case_df['x1'] + case_df['x2'] > 10, 1, 0)
# case_df[['y', 'x0', 'x1', 'x2']].to_csv(
# "/opt/dataset/unit_test/test_data_io.csv", index=True
# )
# @pytest.fixture(scope="module", autouse=True)
# def env():
# os.chdir("python")
# if not os.path.exists("/opt/dataset/unit_test"):
# os.makedirs("/opt/dataset/unit_test")
# prepare_data()
# yield
# if os.path.exists("/opt/dataset/unit_test"):
# shutil.rmtree("/opt/dataset/unit_test")
# @pytest.fixture()
# def data():
# yield CsvReader("/opt/dataset/unit_test/test_data_io.csv", has_id=True, has_label=True)
# class TestCsvReader():
# def test_features(data):
# assert data.features() == np.array([np.arange(10),np.arange(10),2 * np.arange(10) - 10])
# assert data.features("dataframe") == pd.DataFrame({
# 'x0': np.arange(10),
# 'x1': np.arange(10),
# 'x2': 2 * np.arange(10) - 10,
# })
# def test_label(data):
# assert data.label() == np.arange(10)
# assert data.label("dataframe").to_numpy().astype(np.float32) == np.arange(10)
# def test_col_names(data):
# assert data.col_names == ['y', 'x0', 'x1', 'x2']
# def test_feature_names():
# assert data.feature_names == ['x0', 'x1', 'x2']
# def test_label_name():
# assert data.label_name == 'y'
# class TestValidationNumpyDataset():
# def test_builtins():
# data = np.arange(10)
# label = np.array([1,1,1,1,1,0,0,0,0,0])
# batch_size = 4
# dataset = ValidationNumpyDataset(data, label, batch_size)
# for i,(x,y) in enumerate(dataset):
# if i == 0:
# assert (x,y) == (np.array([0,1,2,3]),np.array([1,1,1,1]))
# elif i == 1:
# assert (x,y) == (np.array([4,5,6,7]),np.array([1,0,0,0]))
# elif i == 2:
# assert (x,y) == (np.array([8,9]),np.array([0,0]))
| 2,965 | 30.553191 | 98 | py |
XFL | XFL-master/test/algorithm/core/test_core_metrics.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from algorithm.core.metrics import get_metric, ks
def test_get_metric():
y_true=np.array([1,1,0,0])
y_pred=np.array([1,0,1,0])
metric = get_metric('acc')
assert metric(y_true,y_pred) == 0.5
metric = get_metric('auc')
assert metric(y_true,y_pred) == 0.5
metric = get_metric('recall')
assert metric(y_true,y_pred) == 0.5
def test_ks():
y_true=np.array([1,1,0,0])
y_pred=np.array([0.8,0.5,0.1,0.1])
ks_value = ks(y_true, y_pred)
assert ks_value == 1.0
| 1,128 | 28.710526 | 74 | py |
XFL | XFL-master/test/algorithm/core/test_paillier_acceleration.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from algorithm.core.paillier_acceleration import embed, umbed
from common.crypto.paillier.paillier import Paillier
def almost_equal(a, b, epsilon=1e-5):
return np.all(np.abs(a - b) < epsilon)
p0 = np.random.random((1000,)) - 0.5
p1 = (np.random.random((1000,)) - 0.2) * 100
@pytest.mark.parametrize("p0, p1", [(p0, p1)])
def test_paillier_acceleration(p0, p1):
context = Paillier.context(2048, True)
embeded_p = embed([p0, p1])
c = Paillier.encrypt(context, embeded_p)
sum_c = np.sum(c)
pa = Paillier.decrypt(context, sum_c, out_origin=True)
p2, p3 = umbed([pa], 2)
assert almost_equal(np.sum(p0), p2)
assert almost_equal(np.sum(p1), p3)
| 1,325 | 30.571429 | 74 | py |
XFL | XFL-master/test/algorithm/core/test_data_io.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
from pathlib import Path
from transformers import AutoTokenizer
from algorithm.core.data_io import QADataset
# @pytest.fixture(scope="module", autouse=True)
# def env(tmp_path):
def test_QADataset(tmp_path):
data1 = {
"id": "seed_task_0",
"name": "藏头诗",
"instruction": "根据提示词,写一首藏头诗。",
"instances": [
{"input": "公携人地水风日长", "output": "公子申敬爱,携朋玩物华。人是平阳客,地即石崇家。水文生旧浦,风色满新花。日暮连归骑,长川照晚霞。"},
{"input": "高下绮文蓂柳日连", "output": "高门引冠盖,下客抱支离。绮席珍羞满,文场翰藻摛。蓂华雕上月,柳色蔼春池。日斜归戚里,连骑勒金羁。"}
]
}
data2 = {
"id": "seed_task_0",
"name": "藏头诗",
"instruction": "根据提示词,写一首藏头诗。",
"instances": [
{"input": "砌津既方绮琼云丝", "output": "砌蓂收晦魄,津柳竞年华。既狎忘筌友,方淹投辖车。绮筵回舞雪,琼醑泛流霞。云低上天晚,丝雨带风斜。"},
{"input": "春俯置开林竹兴山", "output": "春华归柳树,俯景落蓂枝。置驿铜街右,开筵玉浦陲。林烟含障密,竹雨带珠危。兴阑巾倒戴,山公下习池。"}
]
}
f1_path = tmp_path / "1.json"
with open(f1_path, "w") as f1:
json.dump(data1, f1)
f2_path = tmp_path / "2.json"
with open(f2_path, "w") as f2:
json.dump(data2, f2)
model_name_or_path = Path(__file__).parent / 'tokenizer'
print(model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) # Callback
dataset = QADataset(
file_name_or_path=tmp_path,
tokenizer=tokenizer,
max_src_length=200,
max_dst_length=500,
prompt_pattern="{}:\n问:{}\n答:",
key_query='input',
key_answer='output'
)
assert len(dataset.data) == 4
print(dataset[1])
| 2,276 | 29.36 | 101 | py |
XFL | XFL-master/test/algorithm/core/horizontal/template/torch/test_h_aggregation_torch.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import pickle
import numpy as np
import pandas as pd
import pytest
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.framework.horizontal.logistic_regression.assist_trainer import HorizontalLogisticRegressionAssistTrainer
from algorithm.framework.horizontal.logistic_regression.label_trainer import HorizontalLogisticRegressionLabelTrainer
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': np.random.random(1000)
})
case_df['y'] = np.where(case_df['x0'] + case_df['x2'] + case_df['x3'] > 2.5, 1, 0)
case_df = case_df[['y', 'x0', 'x1', 'x2', 'x3', 'x4']]
case_df.head(800).to_csv(
"/opt/dataset/unit_test/train_data.csv", index=True
)
case_df.tail(200).to_csv(
"/opt/dataset/unit_test/test_data.csv", index=True
)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_logistic_regression/assist_trainer.json") as f:
conf = json.load(f)
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_data.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_logistic_regression/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_data.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestAggregation:
@pytest.mark.parametrize("aggregation_method", ["fedavg", "fedprox", "scaffold"])
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, aggregation_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
assist_conf["model_info"]["config"]["input_dim"] = 5
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
if aggregation_method == "fedprox":
assist_conf["train_info"]["train_params"]["aggregation"] = {
"method": {"fedprox": {"mu": 0.01}}
}
elif aggregation_method == "scaffold":
assist_conf["train_info"]["train_params"]["aggregation"] = {
"method": {"scaffold": {}}
}
else:
assist_conf["train_info"]["train_params"]["aggregation"] = {
"method": {"fedavg": {}}
}
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
lrt = HorizontalLogisticRegressionLabelTrainer(conf)
lrt_a = HorizontalLogisticRegressionAssistTrainer(assist_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(lrt_a.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(lrt.model.state_dict(), len(lrt.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
lrt.fit()
lrt_a.fit()
| 6,396 | 38.245399 | 130 | py |
XFL | XFL-master/test/algorithm/core/tokenizer/tokenization_chatglm.py | """Tokenization classes for ChatGLM."""
from typing import List, Optional, Union
import os
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.utils import logging, PaddingStrategy
from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
from typing import Dict
import sentencepiece as spm
import numpy as np
logger = logging.get_logger(__name__)
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"THUDM/chatglm-6b": 2048,
}
class TextTokenizer:
def __init__(self, model_path):
self.sp = spm.SentencePieceProcessor()
self.sp.Load(model_path)
self.num_tokens = self.sp.vocab_size()
def encode(self, text):
return self.sp.EncodeAsIds(text)
def decode(self, ids: List[int]):
return self.sp.DecodeIds(ids)
def tokenize(self, text):
return self.sp.EncodeAsPieces(text)
def convert_tokens_to_ids(self, tokens):
return [self.sp.PieceToId(token) for token in tokens]
def convert_token_to_id(self, token):
return self.sp.PieceToId(token)
def convert_id_to_token(self, idx):
return self.sp.IdToPiece(idx)
def __len__(self):
return self.num_tokens
class SPTokenizer:
def __init__(
self,
vocab_file,
num_image_tokens=20000,
max_blank_length=80,
byte_fallback=True,
):
assert vocab_file is not None
self.vocab_file = vocab_file
self.num_image_tokens = num_image_tokens
self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "<unused_0>", "<sop>", "<eop>", "<ENC>", "<dBLOCK>"]
self.max_blank_length = max_blank_length
self.byte_fallback = byte_fallback
self.text_tokenizer = TextTokenizer(vocab_file)
def _get_text_tokenizer(self):
return self.text_tokenizer
@staticmethod
def get_blank_token(length: int):
assert length >= 2
return f"<|blank_{length}|>"
@staticmethod
def get_tab_token():
return f"<|tab|>"
@property
def num_text_tokens(self):
return self.text_tokenizer.num_tokens
@property
def num_tokens(self):
return self.num_image_tokens + self.num_text_tokens
@staticmethod
def _encode_whitespaces(text: str, max_len: int = 80):
text = text.replace("\t", SPTokenizer.get_tab_token())
for i in range(max_len, 1, -1):
text = text.replace(" " * i, SPTokenizer.get_blank_token(i))
return text
def _preprocess(self, text: str, linebreak=True, whitespaces=True):
if linebreak:
text = text.replace("\n", "<n>")
if whitespaces:
text = self._encode_whitespaces(text, max_len=self.max_blank_length)
return text
def encode(
self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True
) -> List[int]:
"""
@param text: Text to encode.
@param linebreak: Whether to encode newline (\n) in text.
@param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.
@param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.
@param add_dummy_prefix: Whether to add dummy blank space in the beginning.
"""
text = self._preprocess(text, linebreak, whitespaces)
if not add_dummy_prefix:
text = "<n>" + text
tmp = self._get_text_tokenizer().encode(text)
tokens = [x + self.num_image_tokens for x in tmp]
return tokens if add_dummy_prefix else tokens[2:]
def decode(self, text_ids: List[int]) -> str:
ids = [int(_id) - self.num_image_tokens for _id in text_ids]
ids = [_id for _id in ids if _id >= 0]
text = self._get_text_tokenizer().decode(ids)
text = text.replace("<n>", "\n")
text = text.replace(SPTokenizer.get_tab_token(), "\t")
for i in range(2, self.max_blank_length + 1):
text = text.replace(self.get_blank_token(i), " " * i)
return text
def tokenize(
self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True
) -> List[str]:
"""
@param text: Text to encode.
@param linebreak: Whether to encode newline (\n) in text.
@param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.
@param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.
@param add_dummy_prefix: Whether to add dummy blank space in the beginning.
"""
text = self._preprocess(text, linebreak, whitespaces)
if not add_dummy_prefix:
text = "<n>" + text
tokens = self._get_text_tokenizer().tokenize(text)
return tokens if add_dummy_prefix else tokens[2:]
def __getitem__(self, x: Union[int, str]):
if isinstance(x, int):
if x < self.num_image_tokens:
return "<image_{}>".format(x)
else:
return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens)
elif isinstance(x, str):
if x.startswith("<image_") and x.endswith(">") and x[7:-1].isdigit():
return int(x[7:-1])
else:
return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens
else:
raise ValueError("The key should be str or int.")
class ChatGLMTokenizer(PreTrainedTokenizer):
"""
Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding.
Args:
vocab_file (`str`):
Path to the vocabulary file.
"""
vocab_files_names = {"vocab_file": "ice_text.model"}
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask", "position_ids"]
def __init__(
self,
vocab_file,
do_lower_case=False,
remove_space=False,
bos_token='<sop>',
eos_token='<eop>',
end_token='</s>',
mask_token='[MASK]',
gmask_token='[gMASK]',
padding_side="left",
pad_token="<pad>",
unk_token="<unk>",
num_image_tokens=20000,
**kwargs
) -> None:
super().__init__(
do_lower_case=do_lower_case,
remove_space=remove_space,
padding_side=padding_side,
bos_token=bos_token,
eos_token=eos_token,
end_token=end_token,
mask_token=mask_token,
gmask_token=gmask_token,
pad_token=pad_token,
unk_token=unk_token,
num_image_tokens=num_image_tokens,
**kwargs
)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.vocab_file = vocab_file
self.bos_token = bos_token
self.eos_token = eos_token
self.end_token = end_token
self.mask_token = mask_token
self.gmask_token = gmask_token
self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens)
""" Initialisation """
@property
def gmask_token_id(self) -> Optional[int]:
if self.gmask_token is None:
return None
return self.convert_tokens_to_ids(self.gmask_token)
@property
def end_token_id(self) -> Optional[int]:
"""
`Optional[int]`: Id of the end of context token in the vocabulary. Returns `None` if the token has not been
set.
"""
if self.end_token is None:
return None
return self.convert_tokens_to_ids(self.end_token)
@property
def vocab_size(self):
""" Returns vocab size """
return self.sp_tokenizer.num_tokens
def get_vocab(self):
""" Returns vocab as a dict """
vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def preprocess_text(self, inputs):
if self.remove_space:
outputs = " ".join(inputs.strip().split())
else:
outputs = inputs
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text, **kwargs):
""" Returns a tokenized string. """
text = self.preprocess_text(text)
seq = self.sp_tokenizer.tokenize(text)
return seq
def _decode(
self,
token_ids: Union[int, List[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
**kwargs
) -> str:
if isinstance(token_ids, int):
token_ids = [token_ids]
if len(token_ids) == 0:
return ""
if self.pad_token_id in token_ids: # remove pad
token_ids = list(filter((self.pad_token_id).__ne__, token_ids))
return self.sp_tokenizer.decode(token_ids)
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.sp_tokenizer[token]
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_tokenizer[index]
def save_vocabulary(self, save_directory, filename_prefix=None):
"""
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`Tuple(str)`: Paths to the files saved.
"""
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, self.vocab_files_names["vocab_file"]
)
else:
vocab_file = save_directory
with open(self.vocab_file, 'rb') as fin:
proto_str = fin.read()
with open(vocab_file, "wb") as writer:
writer.write(proto_str)
return (vocab_file,)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
gmask_id = self.sp_tokenizer[self.gmask_token]
eos_id = self.sp_tokenizer[self.eos_token]
token_ids_0 = token_ids_0 + [gmask_id, self.sp_tokenizer[self.bos_token]]
if token_ids_1 is not None:
token_ids_0 = token_ids_0 + token_ids_1 + [eos_id]
return token_ids_0
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
bos_token_id = self.sp_tokenizer[self.bos_token]
mask_token_id = self.sp_tokenizer[self.mask_token]
gmask_token_id = self.sp_tokenizer[self.gmask_token]
assert self.padding_side == "left"
required_input = encoded_inputs[self.model_input_names[0]]
seq_length = len(required_input)
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if max_length is not None:
if "attention_mask" not in encoded_inputs:
if bos_token_id in required_input:
context_length = required_input.index(bos_token_id)
else:
context_length = seq_length
attention_mask = np.ones((1, seq_length, seq_length))
attention_mask = np.tril(attention_mask)
attention_mask[:, :, :context_length] = 1
attention_mask = np.bool_(attention_mask < 0.5)
encoded_inputs["attention_mask"] = attention_mask
if "position_ids" not in encoded_inputs:
if bos_token_id in required_input:
context_length = required_input.index(bos_token_id)
else:
context_length = seq_length
position_ids = np.arange(seq_length, dtype=np.int64)
mask_token = mask_token_id if mask_token_id in required_input else gmask_token_id
if mask_token in required_input:
mask_position = required_input.index(mask_token)
position_ids[context_length:] = mask_position
block_position_ids = np.concatenate(
[np.zeros(context_length, dtype=np.int64),
np.arange(1, seq_length - context_length + 1, dtype=np.int64)])
encoded_inputs["position_ids"] = np.stack([position_ids, block_position_ids], axis=0)
if needs_to_be_padded:
difference = max_length - len(required_input)
if "attention_mask" in encoded_inputs:
encoded_inputs["attention_mask"] = np.pad(encoded_inputs["attention_mask"],
pad_width=[(0, 0), (difference, 0), (difference, 0)],
mode='constant', constant_values=True)
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
if "position_ids" in encoded_inputs:
encoded_inputs["position_ids"] = np.pad(encoded_inputs["position_ids"],
pad_width=[(0, 0), (difference, 0)])
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
return encoded_inputs
| 16,652 | 37.638051 | 119 | py |
XFL | XFL-master/test/algorithm/core/tree/test_pack_index.py |
from algorithm.core.tree.pack_index import pack_index, unpack_index
def test_pack_index():
a = [3, 6, 1]
res = pack_index(a)
res = unpack_index(res)
assert res == sorted(a) | 191 | 20.333333 | 67 | py |
XFL | XFL-master/test/algorithm/core/tree/test_cat_param_parser.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
from algorithm.core.tree.cat_param_parser import parse_category_param
def test_parse_category_param():
df = pd.DataFrame(np.arange(70).reshape(7, 10),
columns=list(map(chr, range(97, 107))))
df.iloc[:, 9] = 0
df.iloc[:, 6] = 1
columns_name = df.columns.to_list()
res = parse_category_param(df,
col_index="0, 3:5",
col_index_type='inclusive')
assert res == [0, 3, 4]
res = parse_category_param(df,
col_index="-7, -5:-3",
col_index_type='inclusive')
assert res == [3, 5, 6]
with pytest.raises(ValueError):
res = parse_category_param(df,
col_index="-11, -5:-3",
col_index_type='inclusive')
res = parse_category_param(df,
col_index="0, 3:5",
col_index_type='exclusive')
assert res == list(set(range(10)) - set({0, 3, 4}))
res = parse_category_param(df,
col_names=["g", "a"],
col_index_type='inclusive')
res_name = [columns_name[i] for i in res]
assert set(res_name) == set(["g", "a"])
res = parse_category_param(df,
col_names=["g", "a"],
col_names_type='exclusive')
res_name = [columns_name[i] for i in res]
assert set(res_name) == (set(list(map(chr, range(97, 107)))) - set(["g", "a"]))
res = parse_category_param(df,
max_num_value=6,
max_num_value_type="intersection")
res_name = [columns_name[i] for i in res]
assert set(res_name) == set()
res = parse_category_param(df,
max_num_value=6,
max_num_value_type="union")
assert set(res) == set({6, 9})
res = parse_category_param(df,
col_index="0, 3:5",
col_names=["g", "e"],
max_num_value=6,
col_index_type='inclusive',
col_names_type='inclusive',
max_num_value_type="union")
assert set(res) == set({0, 3, 4, 6, 9})
res = parse_category_param(df,
col_index="0, 3:5",
col_names=["g", "e"],
max_num_value=6,
col_index_type='inclusive',
col_names_type='inclusive',
max_num_value_type="intersection")
assert set(res) == set({6})
res = parse_category_param(df,
col_index="0, 3:5",
col_names=["g", "e"],
max_num_value=6,
col_index_type='exclusive',
col_names_type='inclusive',
max_num_value_type="union")
assert set(res) == set({1, 2, 4, 5, 6, 7, 8, 9})
res = parse_category_param(df,
col_index="0, 3:5",
col_names=["g", "e"],
max_num_value=6,
col_index_type='inclusive',
col_names_type='exclusive',
max_num_value_type="union")
assert set(res) == set(range(10))
res = parse_category_param(df,
col_index="0, 3:5",
col_names=["g", "e"],
max_num_value=6,
col_index_type='inclusive',
col_names_type='exclusive',
max_num_value_type="intersection")
assert set(res) == set({9})
res = parse_category_param(df,
col_index="-4, -3:5",
col_names=["g", "e"],
max_num_value=6,
col_index_type='inclusive',
col_names_type='inclusive',
max_num_value_type="intersection")
assert set(res) == set({6})
| 5,134 | 39.753968 | 83 | py |
XFL | XFL-master/test/algorithm/core/tree/test_tree_structure.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Type
import pytest
from algorithm.core.tree.tree_structure import BoostingTree, Node, NodeDict, SplitInfo, Tree
def test_node():
node = Node(id="1111")
node.update_as_non_leaf(split_info=None, left_node_id="12", right_node_id="23")
node.update_as_leaf(weight=1.2)
def test_tree():
with pytest.raises(ValueError):
Tree("1111", root_node_id="12", nodes={}, tree_index=0)
tree = Tree(party_id='node-1', tree_index=0)
assert tree.party_id == 'node-1'
assert len(list(tree.nodes.keys())) == 1
assert list(tree.nodes.keys())[0] == tree.root_node_id
assert tree.root_node.id == tree.root_node_id
split_info = SplitInfo(
owner_id='node-2',
feature_idx=3,
split_point=3.5,
gain=5
)
node_id_1, node_id_2 = tree.split(node_id=tree.root_node_id,
split_info=split_info,
left_sample_index=[1, 3],
right_sample_index=[0, 2],
left_sample_weight=1.0,
right_sample_weight=1.5)
assert tree.check_node(node_id_1)
assert tree.check_node(node_id_2)
node_1 = tree.nodes[node_id_1]
node_2 = tree.nodes[node_id_2]
assert node_1.parent_node_id == tree.root_node_id
assert node_2.parent_node_id == tree.root_node_id
assert tree.root_node.left_node_id == node_id_1
assert tree.root_node.right_node_id == node_id_2
assert node_1.linkage == "left"
assert node_2.linkage == "right"
node_list = tree.search_nodes(depth=1)
assert len(node_list) == 2
assert node_1 in node_list
assert node_2 in node_list
node_id_1, node_id_2 = tree.split(node_id="11111",
split_info=split_info,
left_sample_index=[1, 3],
right_sample_index=[0, 2],
left_sample_weight=1.0,
right_sample_weight=1.5)
assert node_id_1 is None and node_id_2 is None
Tree("1111", root_node_id=tree.root_node_id, nodes=tree.nodes, tree_index=0)
tree.clear_training_info()
for _, node in tree.nodes.items():
assert node.sample_index is None
tree.set_weight(node_id=tree.root_node_id, weight=-1)
with pytest.raises(KeyError):
tree.set_weight(node_id="aaa", weight=2)
def test_boosting_tree():
with pytest.raises(TypeError):
BoostingTree(lr=3, max_depth=[1])
with pytest.raises(TypeError):
BoostingTree(lr=[3, 4], max_depth=3)
with pytest.raises(TypeError):
BoostingTree(lr=[0.3, 0.3], max_depth=[3, 3], trees=None)
with pytest.raises(ValueError):
BoostingTree(lr=[0.1, 0.1], max_depth=[3, 3], trees=[None])
with pytest.raises(ValueError):
BoostingTree(lr=[0.1], max_depth=[3, 3], trees=[None])
trees = {
"lr": [0.3, 0.3],
"max_depth": [5, 5],
"suggest_threshold": 0.5,
"num_trees": 2,
"loss_method": "BCEWithLogitsLoss",
"version": 1.0,
"trees": [
{
"party_id": "A",
"tree_index": 0,
"root_node_id": "11111",
"nodes": {
"11111": {
"id": "11111",
"depth": 0,
"left_node_id": "22222",
"right_node_id": "33333",
"is_leaf": False,
"weight": None,
"linkage": None,
"split_info": {
"owner_id": "A",
"feature_name": None,
"feature_idx": 5,
"is_category": False,
"split_point": 1.23,
"left_cat": [],
}
},
"22222": {
"id": "22222",
"depth": 1,
"left_node_id": "44444",
"right_node_id": "55555",
"is_leaf": False,
"weight": None,
"linkage": 'left',
"split_info": {
"owner_id": "A",
"feature_name": None,
"feature_idx": 1,
"is_category": True,
"split_point": None,
"left_cat": [3, 5],
}
},
"33333": {
"id": "33333",
"depth": 1,
"left_node_id": "66666",
"right_node_id": "77777",
"is_leaf": False,
"weight": None,
"linkage": 'right',
"split_info": {
"owner_id": "B",
"feature_name": None,
"feature_idx": None,
"is_category": None,
"split_point": None,
"left_cat": None,
}
},
"44444": {
"id": "44444",
"depth": 2,
"left_node_id": None,
"right_node_id": None,
"is_leaf": True,
"weight": 0,
"linkage": 'left',
"split_info": None
},
"55555": {
"id": "55555",
"depth": 2,
"left_node_id": None,
"right_node_id": None,
"is_leaf": True,
"weight": 3.8,
"linkage": 'right',
"split_info": None
},
"66666": {
"id": "66666",
"depth": 2,
"left_node_id": None,
"right_node_id": None,
"is_leaf": True,
"weight": -1.2,
"linkage": 'left',
"split_info": None
},
"77777": {
"id": "77777",
"depth": 2,
"left_node_id": None,
"right_node_id": None,
"is_leaf": True,
"weight": 1.4,
"linkage": 'right',
"split_info": None
},
}
},
{
"party_id": "A",
"tree_index": 1,
"root_node_id": "a1111",
"nodes": {
"a1111": {
"id": "a1111",
"depth": 0,
"left_node_id": "a2222",
"right_node_id": "a3333",
"is_leaf": False,
"weight": None,
"linkage": None,
"split_info": {
"owner_id": "B",
"feature_name": None,
"feature_idx": 7,
"is_category": True,
"split_point": None,
"left_cat": [2, 4, 8],
}
},
"a2222": {
"id": "a2222",
"depth": 1,
"left_node_id": None,
"right_node_id": None,
"is_leaf": True,
"weight": 12.52,
"linkage": 'left',
"split_info": None
},
"a3333": {
"id": "a3333",
"depth": 1,
"left_node_id": None,
"right_node_id": None,
"is_leaf": True,
"weight": -12.3,
"linkage": 'right',
"split_info": None
}
}
},
]
}
boosting_tree = BoostingTree.from_dict(trees)
tree_dict = boosting_tree.to_dict()
assert tree_dict == trees
boosting_tree[:1]
boosting_tree[1:]
with pytest.raises(TypeError):
boosting_tree[1]
assert len(boosting_tree) == 2
boosting_tree.append(tree=Tree.from_dict(trees["trees"][1]),
lr=0.1,
max_depth=3)
assert len(boosting_tree) == 3
assert boosting_tree.lr == [0.3, 0.3, 0.1]
assert boosting_tree.max_depth == [5, 5, 3]
res = boosting_tree.to_dict(suggest_threshold=0.6,
compute_group=True)
assert res["suggest_threshold"] == 0.6
input_node_dict = {
"a1111": {
"id": "a1111",
"depth": 0,
"left_node_id": "a2222",
"right_node_id": "a3333",
"is_leaf": False,
"weight": None,
"linkage": None,
"split_info": {
"owner_id": "B",
"feature_idx": 7,
"is_category": True,
"split_point": None,
"left_cat": [2, 4, 8],
}
},
"a2222": {
"id": "a2222",
"depth": 1,
"left_node_id": None,
"right_node_id": None,
"is_leaf": True,
"weight": 12.52,
"linkage": 'left',
"split_info": None
},
"a3333": {
"id": "a3333",
"depth": 1,
"left_node_id": None,
"right_node_id": None,
"is_leaf": True,
"weight": -12.3,
"linkage": 'right',
"split_info": None
}
}
node_dict = NodeDict({k: Node.from_dict(v) for k, v in input_node_dict.items()})
node_dict.update(
{
"a2224": Node.from_dict(
{
"id": "a2224",
"depth": 1,
"left_node_id": None,
"right_node_id": None,
"is_leaf": True,
"weight": 12.52,
"linkage": 'left',
"split_info": None
}
)
}
)
assert len(node_dict) == 4
out = node_dict.to_dict()
node_dict.from_dict(out)
| 12,015 | 34.134503 | 92 | py |
XFL | XFL-master/test/algorithm/core/tree/test_goss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from algorithm.core.tree.goss import Goss
normal_data = [
(0.5, 0.4),
(0.3, 0.7),
(0, 0.5),
(0.5, 0)
]
abnormal_data = [
(0.5, 0.6),
(1.2, 0.5),
(0.5, 1.2),
(-1, 0.5),
(0.5, -1)
]
abnormal_data2 = [
(0.0001, 0.0002)
]
class TestGoss():
def setup_class(self):
self.size = 2000
self.g = np.random.random((self.size,))
self.h = np.random.random((self.size,))
def teardown_class(self):
pass
@pytest.mark.parametrize("alpha, beta", normal_data)
def test_normal(self, alpha, beta):
goss = Goss(alpha, beta)
selected_idx = goss.sampling(self.g)
goss.update_gradients(self.g, self.h)
assert len(selected_idx) == int(self.size * alpha) + int(self.size *beta)
assert len(goss.rand_set_idx) == int(self.size * beta)
assert len(np.unique(selected_idx)) == len(selected_idx)
assert len(np.unique(goss.rand_set_idx)) == len(goss.rand_set_idx)
@pytest.mark.parametrize("alpha, beta", abnormal_data)
def test_abnormal_1(self, alpha, beta):
with pytest.raises(ValueError):
Goss(alpha, beta)
@pytest.mark.parametrize("alpha, beta", abnormal_data2)
def test_abnormal_2(self, alpha, beta):
goss = Goss(alpha, beta)
with pytest.raises(ValueError):
goss.sampling(self.g)
| 2,035 | 28.085714 | 81 | py |
XFL | XFL-master/test/algorithm/core/tree/test_feature_importance.py | from algorithm.core.tree.feature_importance import FeatureImportance
class TestFeatureImportance(object):
def test_split_importance(self):
fi1 = FeatureImportance(0.1, main_type='gain')
fi2 = FeatureImportance(0.2, main_type='gain')
fi3 = FeatureImportance(0, 1, main_type='split')
fi4 = FeatureImportance(0, 2, main_type='split')
assert fi1.get() == 0.1
assert fi3.get() == 1
assert fi1 == fi1
assert fi1 < fi2
assert fi3 < fi4
assert fi3 == fi3
# test add
fi5 = fi3 + fi4
assert fi5.importance_split == 3
fi3.add_split(1)
assert fi3.get() == 2
fi3.add_gain(0.1)
assert fi3.importance_gain == 0.1
assert "{}".format(fi3) == "importance: 2"
assert "{}".format(fi1) == "importance: 0.1"
fi_list = [fi1, fi2]
fi_list = sorted(fi_list, reverse=True)
assert fi_list[0] == fi2
fi_list = [fi3, fi4]
fi_list = sorted(fi_list)
assert fi_list[0] == fi3
| 898 | 27.09375 | 68 | py |
XFL | XFL-master/test/algorithm/common/evaluation/test_metrics.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import numpy as np
import pandas as pd
import pytest
from common.evaluation.metrics import DecisionTable, ThresholdCutter, BiClsMetric
@pytest.fixture()
def env():
if not os.path.exists("/opt/checkpoints/unit_test"):
os.mkdir("/opt/checkpoints/unit_test")
yield
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestBiClsMetric:
def test_biclsmetric(self):
metric_config = {
"acc": {},
"precision": {},
"recall": {},
"f1_score": {},
"auc": {},
"ks": {}
}
loss_config = {
"BCEWithLogitsLoss": {}
}
metric = BiClsMetric(1, "/opt/checkpoints/unit_test/metric.file", metric_config, loss_config)
assert "BCEWithLogitsLoss" in metric.metric_functions
class TestDecisionTable:
@pytest.mark.parametrize('bins', [-1, 0, 1, 2, 5, 10, 50])
def test_bins(self, bins, env):
io_file_path = "/opt/checkpoints/decision_table_{}.csv".format(bins)
config = {"bins": bins}
if bins <= 1:
with pytest.raises(ValueError) as e:
dt = DecisionTable(config)
else:
dt = DecisionTable(config)
# prepare a random test data
y_true = np.array([1] * 50000 + [0] * 50000)
y_pred = np.random.normal(0, 1, size=100000)
np.random.shuffle(y_true)
dt.fit(y_true, y_pred)
assert len(dt.stats) == bins
dt.save(io_file_path)
# read_from_file
df = pd.read_csv(io_file_path)
assert len(df) == bins
assert (df["样本数"] == (100000 / bins)).all()
assert df.iloc[-1]["累计总样本数"] == 100000
assert df.iloc[-1]["累计负样本数"] == 50000
assert df.iloc[-1]["累计负样本/负样本总数"] == "100.00%"
assert df.iloc[-1]["累计正样本/正样本总数"] == "100.00%"
assert df.iloc[-1]["累计负样本/累计总样本"] == "50.00%"
@pytest.mark.parametrize("method", ["equal_frequency", "equal_width", 'other'])
def test_methods(self, method, env):
io_file_path = "/opt/checkpoints/decision_table_{}.csv".format(method)
config = {"method": method}
if method not in ("equal_frequency", "equal_width"):
with pytest.raises(NotImplementedError) as e:
dt = DecisionTable(config)
else:
dt = DecisionTable(config)
# prepare a random test data
y_true = np.array([1] * 50000 + [0] * 50000)
y_pred = np.random.normal(0, 1, size=100000)
np.random.shuffle(y_true)
dt.fit(y_true, y_pred)
dt.save(io_file_path)
# read_from_file
df = pd.read_csv(io_file_path)
if method == "equal_frequency":
assert (df["样本数"] == 100000 / dt.bins).all()
elif method == "equal_width":
max_value, min_value = y_pred.max(), y_pred.min()
interval = (max_value - min_value) / dt.bins
left = float(df["区间"].iloc[0].strip("(]").split(', ')[0])
right = float(df["区间"].iloc[0].strip("(]").split(', ')[1])
assert left <= min_value
np.testing.assert_almost_equal(right, min_value + interval, decimal=2)
else:
raise NotImplementedError("test failed.")
def test_threshold_cutter_by_value(self):
io_file_path = "/opt/checkpoints/ks_plot.csv"
y = [1] * 100 + [0] * 400 + [1] * 400 + [0] * 100
p = np.arange(0.5, 1, 0.0005)
tc = ThresholdCutter(io_file_path)
tc.cut_by_value(y, p)
np.testing.assert_almost_equal(tc.bst_score, 0.6, decimal=3)
np.testing.assert_almost_equal(tc.bst_threshold, 0.75, decimal=3)
tc.save()
df = pd.read_csv(io_file_path)
assert (df["ks"] >= 0).all()
assert (df["ks"] <= 1.0).all()
def test_threshold_cutter_by_index(self):
y = [1] * 100 + [0] * 400 + [1] * 400 + [0] * 100
p = np.arange(0.5, 1, 0.0005)
tc = ThresholdCutter()
tc.cut_by_index(y, p)
np.testing.assert_almost_equal(tc.bst_score, 0.6, decimal=3)
np.testing.assert_almost_equal(tc.bst_threshold, 0.75, decimal=3)
| 4,957 | 35.455882 | 101 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_poisson_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import numpy as np
import pandas as pd
import pytest
import torch
from gmpy2 import powmod
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.framework.horizontal.poisson_regression.assist_trainer import HorizontalPoissonRegressionAssistTrainer
from algorithm.framework.horizontal.poisson_regression.label_trainer import HorizontalPoissonRegressionLabelTrainer
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
np.random.seed(42)
case_df = pd.DataFrame({
'x0': np.random.random(1000) + 0.5,
'x1': [0] * 1000,
'x2': np.random.random(1000) + 1.0,
'x3': np.random.random(1000),
'x4': np.random.random(1000) + 1.0
})
feat_mat = case_df.values
lin_theo = np.dot(feat_mat, np.array([1, 0, 1, 3, 0]))
print(f"Max of lin_theo: {lin_theo.max()}")
print(f"Min of lin_theo: {lin_theo.min()}")
theore_pred = np.exp(np.dot(feat_mat, np.array([1, 0, 1, 3, 0])))
print(f"Theoretical pred: {theore_pred}")
print(f"Min theoretical pred: {theore_pred.min()}")
print(f"Min of case_df: {case_df.min(axis=0)}")
case_df['y'] = np.rint(
np.exp(case_df['x0'] + 1*case_df['x2'] + 2*case_df['x3'])
)
case_df = case_df[['y', 'x0', 'x1', 'x2', 'x3', 'x4']]
case_df.head(800).to_csv(
"/opt/dataset/unit_test/train_data.csv", index=False
)
case_df.tail(200).to_csv(
"/opt/dataset/unit_test/test_data.csv", index=False
)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_poisson_regression/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_poisson_regression/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestPoissonRegression:
@pytest.mark.parametrize("encryption_method", ['plain', 'otp'])
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
if encryption_method == "plain":
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
else:
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["otp"]
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap",
return_value=(1, g_power_a))
mocker.patch.object(Commu, "node_id", "node-1")
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
print(f"trainer conf: {json.dumps(conf)}")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
prt = HorizontalPoissonRegressionLabelTrainer(conf)
prt.model.linear.weight = torch.nn.parameter.Parameter(
torch.tensor([[1.0, 0.0, 1.0, 1.0, 0.0]]))
prt.model.linear.bias = torch.nn.parameter.Parameter(
torch.tensor([0.0]))
prt_a = HorizontalPoissonRegressionAssistTrainer(assist_conf)
prt_a.model.linear.weight = torch.nn.parameter.Parameter(
torch.tensor([[1.0, 0.0, 0.0, 0.0, 0.0]]))
print(prt_a.model.linear.weight)
prt_a.model.linear.bias = torch.nn.parameter.Parameter(
torch.tensor([0.0]))
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(prt_a.model.state_dict()) + EOV
print("param plain received")
print(params_plain_recv)
print("param plain received loaded")
print(pickle.loads(params_plain_recv))
params_send = fed_method._calc_upload_value(
prt.model.state_dict(), len(prt.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
print(f"Params collect: {params_collect}")
print(f"Loaded params: {pickle.loads(params_collect)}")
print()
# agg_otp = fed_assist_method._calc_aggregated_params(
# list(map(lambda x: pickle.loads(x), [params_collect, params_collect])))
# print(f"agg otp: {agg_otp}")
agg_otp = prt_a.model.state_dict()
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
print(prt.model)
prt.model.linear.weight = torch.nn.parameter.Parameter(
torch.tensor([[1.0, 0.0, 1.0, 1.0, 0.0]]))
prt.model.linear.bias = torch.nn.parameter.Parameter(
torch.tensor([0.0]))
prt.fit()
print("Successfully tested label trainer")
prt_a.model.linear.weight = torch.nn.parameter.Parameter(
torch.tensor([[1.0, 0.0, 0.0, 0.0, 0.0]]))
print(prt_a.model.linear.weight)
prt_a.model.linear.bias = torch.nn.parameter.Parameter(
torch.tensor([0.0]))
print(prt_a.model.linear.bias)
prt_a.fit()
| 8,853 | 39.063348 | 117 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_vgg.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import numpy as np
import pytest
from gmpy2 import powmod
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from algorithm.framework.horizontal.vgg.assist_trainer import HorizontalVggAssistTrainer
from algorithm.framework.horizontal.vgg.label_trainer import HorizontalVggLabelTrainer
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
np.random.seed(0)
data, label = np.random.randint(256, size=(
64, 32, 32, 3)), np.random.randint(10, size=64)
test_data, test_labels = data[:32], label[:32]
train_data, train_labels = data[32:64], label[32:64]
np.savez("/opt/dataset/unit_test/test_data.npz",
data=test_data, labels=test_labels)
np.savez("/opt/dataset/unit_test/train_data.npz",
data=train_data, labels=train_labels)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_vgg/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_vgg/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestVgg:
@pytest.mark.parametrize("encryption_method", ['plain']) # ['otp', 'plain'] otp too slow
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(FedConfig, "node_id", 'node-1')
if encryption_method == "plain":
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
else:
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["otp"]
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap", return_value=(1, g_power_a))
Commu.node_id = "node-1"
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
rest = HorizontalVggLabelTrainer(conf)
rest_a = HorizontalVggAssistTrainer(assist_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(rest_a.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(
rest.model.state_dict(), len(rest.train_dataloader))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
rest.fit()
rest_a.fit()
| 6,686 | 38.803571 | 130 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_vgg_jax.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import numpy as np
import pytest
import service.fed_config
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
from gmpy2 import powmod
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
np.random.seed(0)
data, label = np.random.randint(256, size=(
64, 32, 32, 3)), np.random.randint(10, size=64)
test_data, test_labels = data[:32], label[:32]
train_data, train_labels = data[32:64], label[32:64]
np.savez("/opt/dataset/unit_test/test_data.npz",
data=test_data, labels=test_labels)
np.savez("/opt/dataset/unit_test/train_data.npz",
data=train_data, labels=train_labels)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_vgg_jax/assist_trainer.json") as f:
conf = json.load(f)
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_data.npz"
conf["output"]["model"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["metrics"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["evaluation"]["path"] = "/opt/checkpoints/unit_test"
conf["model_info"]["config"]["layers"] = "unit_test"
conf["train_info"]["params"]["batch_size"] = 8
conf["train_info"]["params"]["global_epoch"] = 2
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_vgg_jax/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_data.npz"
conf["output"]["metrics"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["evaluation"]["path"] = "/opt/checkpoints/unit_test"
conf["model_info"]["config"]["layers"] = "unit_test"
conf["train_info"]["params"]["batch_size"] = 8
conf["train_info"]["params"]["global_epoch"] = 2
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestVgg:
@pytest.mark.parametrize("encryption_method", ['plain']) # ['otp', 'plain'] otp too slow
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(
service.fed_config.FedConfig, "node_id", 'node-1'
)
if encryption_method == "plain":
conf["train_info"]["params"]["aggregation_config"]["encryption"] = {"plain": {}}
assist_conf["train_info"]["params"]["aggregation_config"]["encryption"] = {"plain": {}}
sec_conf = conf["train_info"]["params"]["aggregation_config"]["encryption"]
def mock_recv(*args, **kwargs):
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap", return_value=(1, g_power_a))
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
service.fed_config.FedConfig.stage_config = conf
from algorithm.framework.horizontal.vgg_jax.assist_trainer import HorizontalVggJaxAssistTrainer
from algorithm.framework.horizontal.vgg_jax.label_trainer import HorizontalVggJaxLabelTrainer
rest = HorizontalVggJaxLabelTrainer(conf)
rest_a = HorizontalVggJaxAssistTrainer(assist_conf)
params_plain_recv = pickle.dumps(rest_a.state_dict) + EOV
params_send = fed_method._calc_upload_value(rest.state_dict, len(rest.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 4 in [1,2]:
return params_plain_recv
elif recv_mocker.call_count % 4 in [0,3] :
return params_collect
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
rest.fit()
rest_a.fit()
| 7,515 | 41.948571 | 130 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_linear_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import numpy as np
import pandas as pd
import pytest
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.framework.horizontal.linear_regression.assist_trainer import HorizontalLinearRegressionAssistTrainer
from algorithm.framework.horizontal.linear_regression.label_trainer import HorizontalLinearRegressionLabelTrainer
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
from gmpy2 import powmod
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': np.random.random(1000)
})
case_df['y'] = case_df['x0'] + case_df['x2'] + case_df['x3']
case_df = case_df[['y', 'x0', 'x1', 'x2', 'x3', 'x4']]
case_df.head(800).to_csv(
"/opt/dataset/unit_test/train_data.csv", index=True
)
case_df.tail(200).to_csv(
"/opt/dataset/unit_test/test_data.csv", index=True
)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_linear_regression/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_linear_regression/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestLogisticRegression:
@pytest.mark.parametrize("encryption_method", ['plain','otp'])
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
if encryption_method == "plain":
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
else:
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["otp"]
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap", return_value=(1, g_power_a))
Commu.node_id = "node-1"
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
lrt = HorizontalLinearRegressionLabelTrainer(conf)
lrt_a = HorizontalLinearRegressionAssistTrainer(assist_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(lrt_a.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(
lrt.model.state_dict(), len(lrt.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
lrt.fit()
lrt_a.fit()
| 6,895 | 38.405714 | 130 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_gcn_mol.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import pytest
from gmpy2 import powmod
from dgllife.data import HIV
from dgllife.utils import CanonicalAtomFeaturizer
from dgllife.utils import SMILESToBigraph
from sklearn.model_selection import train_test_split
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from algorithm.framework.horizontal.gcn_mol.assist_trainer import HorizontalGcnMolAssistTrainer
from algorithm.framework.horizontal.gcn_mol.label_trainer import HorizontalGcnMolLabelTrainer
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
node_featurizer = CanonicalAtomFeaturizer()
edge_featurizer = None # CanonicalBondFeaturizer()
smiles_to_g = SMILESToBigraph(
add_self_loop=True,
node_featurizer=node_featurizer,
edge_featurizer=edge_featurizer
)
data = HIV(smiles_to_graph=smiles_to_g, n_jobs=1,
cache_file_path="/opt/dataset/unit_test/dgl_hiv.bin")
df = data.df
df = df[['HIV_active', 'smiles']]
train_df, test_df = train_test_split(df, test_size=0.3, random_state=42)
train_df.to_csv("/opt/dataset/unit_test/train_data.csv", index=False)
test_df.to_csv("/opt/dataset/unit_test/test_data.csv", index=False)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_gcn_mol/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_gcn_mol/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestGcnMol:
# ['otp', 'plain'] otp too slow
@pytest.mark.parametrize("encryption_method", ['plain'])
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(FedConfig, "node_id", 'node-1')
if encryption_method == "plain":
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
else:
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["otp"]
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap",
return_value=(1, g_power_a))
Commu.node_id = "node-1"
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
rest = HorizontalGcnMolLabelTrainer(conf)
rest_a = HorizontalGcnMolAssistTrainer(assist_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(rest_a.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(
rest.model.state_dict(), len(rest.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(
list(map(lambda x: pickle.loads(x), [params_collect, params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
rest.fit()
rest_a.fit()
| 7,122 | 38.137363 | 110 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_logistic_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import numpy as np
import pandas as pd
import pytest
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.framework.horizontal.logistic_regression.assist_trainer import HorizontalLogisticRegressionAssistTrainer
from algorithm.framework.horizontal.logistic_regression.label_trainer import HorizontalLogisticRegressionLabelTrainer
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
from gmpy2 import powmod
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': np.random.random(1000)
})
case_df['y'] = np.where(case_df['x0'] + case_df['x2'] + case_df['x3'] > 2.5, 1, 0)
case_df = case_df[['y', 'x0', 'x1', 'x2', 'x3', 'x4']]
case_df.head(800).to_csv(
"/opt/dataset/unit_test/train_data.csv", index=True
)
case_df.tail(200).to_csv(
"/opt/dataset/unit_test/test_data.csv", index=True
)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_logistic_regression/assist_trainer.json") as f:
conf = json.load(f)
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_data.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_logistic_regression/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_data.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestLogisticRegression:
@pytest.mark.parametrize("encryption_method", ['plain','otp'])
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
assist_conf["model_info"]["config"]["input_dim"] = 5
# if encryption_method == "otp":
# mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
# # dc.remote_id = "node-1"
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
if encryption_method == "plain":
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
else:
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["otp"]
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap", return_value=(1, g_power_a))
mocker.patch.object(Commu, "node_id", "node-1")
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
lrt = HorizontalLogisticRegressionLabelTrainer(conf)
lrt_a = HorizontalLogisticRegressionAssistTrainer(assist_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(lrt_a.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(lrt.model.state_dict(), len(lrt.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
lrt.fit()
lrt_a.fit()
| 7,640 | 40.302703 | 130 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_kmeans.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import pandas as pd
import pytest
from gmpy2 import powmod
from sklearn.datasets import make_blobs
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.horizontal.aggregation import aggregation_base
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from algorithm.framework.horizontal.kmeans.assist_trainer import HorizontalKmeansAssistTrainer
from algorithm.framework.horizontal.kmeans.label_trainer import HorizontalKmeansLabelTrainer
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
X, y = make_blobs(n_samples=450, n_features=2,
random_state=42, cluster_std=2.0)
data_df = pd.DataFrame({'label': y, 'x1': X[:, 0], 'x2': X[:, 1]})
data_df.head(400).to_csv(
"/opt/dataset/unit_test/horizontal_kmeans_train.csv"
)
data_df.tail(50).to_csv(
"/opt/dataset/unit_test/horizontal_kmeans_test.csv"
)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_kmeans/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_kmeans/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestHorizontalKMeans:
# ['otp', 'plain'] otp too slow
@pytest.mark.parametrize("encryption_method", ['plain'])
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
mocker.patch.object(aggregation_base, "MAX_BLOCK_SIZE", 30000000)
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(FedConfig, "node_id", 'node-1')
if encryption_method == "plain":
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
else:
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["otp"]
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap", return_value=(1, g_power_a))
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
kmeans = HorizontalKmeansLabelTrainer(conf)
kmeans_a = HorizontalKmeansAssistTrainer(assist_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(kmeans_a.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(
kmeans.model.state_dict(), len(kmeans.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
kmeans.fit()
kmeans_a.fit()
| 6,816 | 38.404624 | 130 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_resnet_paddle.py | # # Copyright 2022 The XFL Authors. All rights reserved.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# import json
# import os
# import shutil
# from random import SystemRandom
# import pickle
# import numpy as np
# import pytest
# from collections import OrderedDict
# import service.fed_config
# from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
# from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
# from common.communication.gRPC.python.channel import DualChannel
# from common.communication.gRPC.python.commu import Commu
# from common.crypto.key_agreement.contants import primes_hex
# from gmpy2 import powmod
# MOV = b"@" # middle of value
# EOV = b"&" # end of value
# def prepare_data():
# np.random.seed(0)
# data, label = np.random.randint(256, size=(
# 64, 32, 32, 3)), np.random.randint(10, size=64)
# test_data, test_labels = data[:32], label[:32]
# train_data, train_labels = data[32:64], label[32:64]
# np.savez("/opt/dataset/unit_test/test_data.npz",
# data=test_data, labels=test_labels)
# np.savez("/opt/dataset/unit_test/train_data.npz",
# data=train_data, labels=train_labels)
# @pytest.fixture()
# def get_assist_trainer_conf():
# with open("python/algorithm/config/horizontal_resnet_paddle/assist_trainer.json") as f:
# conf = json.load(f)
# conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
# conf["input"]["valset"][0]["name"] = "test_data.npz"
# conf["output"]["model"]["path"] = "/opt/checkpoints/unit_test"
# conf["output"]["metrics"]["path"] = "/opt/checkpoints/unit_test"
# conf["output"]["evaluation"]["path"] = "/opt/checkpoints/unit_test"
# conf["model_info"]["config"]["layers"] = "unit_test"
# conf["train_info"]["params"]["batch_size"] = 8
# conf["train_info"]["params"]["global_epoch"] = 2
# yield conf
# @pytest.fixture()
# def get_trainer_conf():
# with open("python/algorithm/config/horizontal_resnet_paddle/trainer.json") as f:
# conf = json.load(f)
# conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
# conf["input"]["trainset"][0]["name"] = "train_data.npz"
# conf["output"]["metrics"]["path"] = "/opt/checkpoints/unit_test"
# conf["output"]["evaluation"]["path"] = "/opt/checkpoints/unit_test"
# conf["model_info"]["config"]["layers"] = "unit_test"
# conf["train_info"]["params"]["batch_size"] = 8
# conf["train_info"]["params"]["global_epoch"] = 2
# yield conf
# @pytest.fixture(scope="module", autouse=True)
# def env():
# if not os.path.exists("/opt/dataset/unit_test"):
# os.makedirs("/opt/dataset/unit_test")
# if not os.path.exists("/opt/checkpoints/unit_test"):
# os.makedirs("/opt/checkpoints/unit_test")
# prepare_data()
# yield
# if os.path.exists("/opt/dataset/unit_test"):
# shutil.rmtree("/opt/dataset/unit_test")
# if os.path.exists("/opt/checkpoints/unit_test"):
# shutil.rmtree("/opt/checkpoints/unit_test")
# class TestResnet:
# @pytest.mark.parametrize("encryption_method", ['plain']) # ['otp', 'plain'] otp too slow
# def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
# fed_method = None
# fed_assist_method = None
# mocker.patch.object(Commu, "node_id", "assist_trainer")
# Commu.trainer_ids = ['node-1', 'node-2']
# Commu.scheduler_id = 'assist_trainer'
# conf = get_trainer_conf
# assist_conf = get_assist_trainer_conf
# mocker.patch.object(
# service.fed_config.FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_assist_trainer", return_value='assist_trainer'
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "node_id", 'node-1'
# )
# # def mock_commu():
# # if commu_node_id.call_count == 1:
# # return "node-1"
# # elif commu_node_id.call_count == 2:
# # return "node-2"
# # commu_node_id = mocker.patch.object(Commu, "node_id", =mock_commu())
# if encryption_method == "plain":
# conf["train_info"]["params"]["aggregation_config"]["encryption"] = {
# "method": "plain"}
# assist_conf["train_info"]["params"]["aggregation_config"]["encryption"] = {
# "method": "plain"}
# sec_conf = conf["train_info"]["params"]["aggregation_config"]["encryption"]
# def mock_recv(*args, **kwargs):
# return params_plain_recv
# def mock_collect(*args, **kwargs):
# return params_collect
# def mock_agg(*args, **kwargs):
# return agg_otp
# def rebuild_state_dict(state_dict):
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# new_state_dict[k] = v.numpy()
# return new_state_dict
# if encryption_method == "plain":
# fed_method = AggregationPlainLeaf(sec_conf)
# fed_assist_method = AggregationPlainRoot(sec_conf)
# elif encryption_method == "otp":
# mocker.patch.object(DualChannel, "__init__", return_value=None)
# # dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
# DualChannel.remote_id = "node-2"
# supported_shortest_exponents = [225, 275, 325, 375, 400]
# shorest_exponent = supported_shortest_exponents[1]
# lower_bound = 1 << (supported_shortest_exponents[1] - 1)
# upper_bound = 1 << shorest_exponent
# primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
# rand_num_generator = SystemRandom()
# a = rand_num_generator.randint(lower_bound, upper_bound)
# g_power_a = powmod(2, a, primes[1])
# mocker.patch.object(DualChannel, "swap", return_value=(1, g_power_a))
# fed_method = AggregationOTPLeaf(sec_conf)
# fed_assist_method = AggregationOTPRoot(sec_conf)
# service.fed_config.FedConfig.stage_config = conf
# from algorithm.framework.horizontal.resnet_paddle.assist_trainer import HorizontalResnetPaddleAssistTrainer
# from algorithm.framework.horizontal.resnet_paddle.label_trainer import HorizontalResnetPaddleLabelTrainer
# rest = HorizontalResnetPaddleLabelTrainer(conf)
# rest_a = HorizontalResnetPaddleAssistTrainer(assist_conf)
# params_plain_recv = pickle.dumps(rebuild_state_dict(rest_a.model.state_dict())) + EOV
# params_send = fed_method._calc_upload_value(
# rebuild_state_dict(rest.model.state_dict()), len(rest.train_dataloader.dataset))
# params_collect = pickle.dumps(params_send)
# agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
# def mock_recv(*args, **kwargs):
# if recv_mocker.call_count % 4 in [1,2]:
# return params_plain_recv
# elif recv_mocker.call_count % 4 in [0,3] :
# return params_collect
# recv_mocker = mocker.patch.object(
# DualChannel, "recv", side_effect=mock_recv
# )
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# DualChannel, "send", return_value=None
# )
# mocker.patch.object(
# AggregationOTPRoot, "aggregate", side_effect=mock_agg
# )
# mocker.patch.object(
# AggregationPlainRoot, "aggregate", side_effect=mock_agg
# )
# rest.fit()
# rest_a.fit()
| 8,572 | 42.29798 | 132 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_resnet.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import numpy as np
import pytest
from gmpy2 import powmod
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from algorithm.framework.horizontal.resnet.assist_trainer import HorizontalResnetAssistTrainer
from algorithm.framework.horizontal.resnet.label_trainer import HorizontalResnetLabelTrainer
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
np.random.seed(0)
data, label = np.random.randint(256, size=(
64, 32, 32, 3)), np.random.randint(10, size=64)
test_data, test_labels = data[:32], label[:32]
train_data, train_labels = data[32:64], label[32:64]
np.savez("/opt/dataset/unit_test/test_data.npz",
data=test_data, labels=test_labels)
np.savez("/opt/dataset/unit_test/train_data.npz",
data=train_data, labels=train_labels)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_resnet/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_resnet/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestResnet:
@pytest.mark.parametrize("encryption_method", ['plain']) # ['otp', 'plain'] otp too slow
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(FedConfig, "node_id", 'node-1')
if encryption_method == "plain":
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
else:
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["otp"]
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap", return_value=(1, g_power_a))
mocker.patch.object(Commu, "node_id", "node-1")
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
rest = HorizontalResnetLabelTrainer(conf)
rest_a = HorizontalResnetAssistTrainer(assist_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(rest_a.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(
rest.model.state_dict(), len(rest.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
rest.fit()
rest_a.fit()
| 6,736 | 38.863905 | 130 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_bert.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import pandas as pd
import pytest
from gmpy2 import powmod
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.framework.horizontal.bert.assist_trainer import HorizontalBertAssistTrainer
from algorithm.framework.horizontal.bert.label_trainer import HorizontalBertLabelTrainer
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
case_df = pd.DataFrame({
'sentence': ["the action is stilted","cold movie","smile on face","redundant concept","the greatest musicians",
"sometimes dry","shot on ugly digital video","funny yet","a beautifully","no apparent joy"],
'label': [0,1,0,1,1,0,1,1,1,1]
})
case_df.head(6).to_csv(
"/opt/dataset/unit_test/train_data.tsv", sep='\t'
)
case_df.tail(4).to_csv(
"/opt/dataset/unit_test/test_data.tsv", sep='\t'
)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_bert/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_bert/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestBertTorch:
#@pytest.mark.skip(reason="no reason")
@pytest.mark.parametrize("encryption_method", ['plain']) # ['otp', 'plain'] otp too slow
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(FedConfig, "node_id", 'node-1')
if encryption_method == "plain":
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
else:
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["otp"]
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap", return_value=(1, g_power_a))
Commu.node_id = "node-1"
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
bert = HorizontalBertLabelTrainer(conf)
bert_a = HorizontalBertAssistTrainer(assist_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(bert_a.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(
bert.model.state_dict(), len(bert.train_dataloader))
params_collect = [pickle.dumps(params_send), pickle.dumps(params_send)]
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), params_collect)))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
bert.fit()
bert_a.fit()
| 6,819 | 39.35503 | 119 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_nbafl.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import numpy as np
from scipy.stats import normaltest
import pickle
import pandas as pd
import torch
import pytest
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from algorithm.framework.horizontal.nbafl.assist_trainer import HorizontalNbaflAssistTrainer
from algorithm.framework.horizontal.nbafl.label_trainer import HorizontalNbaflLabelTrainer
from common.communication.gRPC.python.channel import DualChannel
from common.utils.logger import logger
from common.utils.config_sync import ConfigSynchronizer
from common.communication.gRPC.python.commu import Commu
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': np.random.random(1000)
})
case_df['y'] = np.where(
case_df['x0'] + case_df['x2'] + case_df['x3'] > 2.5, 1, 0)
case_df = case_df[['y', 'x0', 'x1', 'x2', 'x3', 'x4']]
case_df.head(800).to_csv(
"/opt/dataset/unit_test/train_data.csv", index=True
)
case_df.tail(200).to_csv(
"/opt/dataset/unit_test/test_data.csv", index=True
)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_nbafl/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_nbafl/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestNbafl:
def test_uplink_sigma(self, get_trainer_conf, get_assist_trainer_conf, mocker):
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
nbafl_t = HorizontalNbaflLabelTrainer(conf)
logger.info(f"{len(nbafl_t.train_dataloader.dataset)} of data")
nbafl_t._calc_uplink_sigma({})
sigma_u = nbafl_t.sigma_u
expected_sigma_u = np.sqrt(2 * np.log(12.5)) / 80
logger.info(f"expected uplink sigma: {expected_sigma_u}")
assert np.abs(sigma_u - expected_sigma_u) < 0.0001
def test_uplink_add_noise(self, get_trainer_conf, get_assist_trainer_conf, mocker):
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
nbafl_t = HorizontalNbaflLabelTrainer(conf)
nbafl_t.sigma_u = 0.1
diff_list = []
orig_params = [
param.data.detach().clone() for param in nbafl_t.model.parameters()
]
np.random.seed(42)
torch.manual_seed(42)
for _ in range(3):
iter_diff_list = []
nbafl_t._add_noise({})
for orig_param, new_param in zip(orig_params, nbafl_t.model.parameters()):
iter_diff_list.extend(torch.flatten(
orig_param - new_param.data.detach()
).numpy().tolist())
diff_list.extend(iter_diff_list)
_, pval = normaltest(diff_list)
logger.info("Normal test p-value: {}".format(pval))
assert pval > 0.1
diff_sigma = np.std(diff_list)
logger.info("Diff std: {}".format(diff_sigma))
assert np.abs(diff_sigma - nbafl_t.sigma_u) < 0.05
def test_downlink_sigma(self, get_trainer_conf, get_assist_trainer_conf, mocker):
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
nbafl_at = HorizontalNbaflAssistTrainer(conf)
nbafl_at.min_sample_num = 10
expected_sigma_d = 10 * \
np.sqrt(2 * np.log(12.5)) * np.sqrt((25-8) / 20)
nbafl_at._calc_downlink_sigma({})
assert (nbafl_at.sigma_d - expected_sigma_d) < 0.0001
def test_label_trainer(self, get_trainer_conf, get_assist_trainer_conf, mocker):
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
nbafl_t = HorizontalNbaflLabelTrainer(conf)
nbafl_t.sigma_u = 0.1
mocker.patch.object(
ConfigSynchronizer, "__init__", return_value=None
)
mocker.patch.object(
ConfigSynchronizer, "sync", return_value=assist_conf
)
mocker.patch("service.fed_control._send_progress")
nbafl_at = HorizontalNbaflAssistTrainer(assist_conf)
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(nbafl_at.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(nbafl_t.model.state_dict(), len(nbafl_t.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
nbafl_t.fit()
nbafl_at.min_sample_num = 10
mocker.patch.object(
DualChannel, "recv", return_value=10
)
nbafl_at.fit()
| 10,433 | 37.360294 | 130 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_densenet.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import numpy as np
import pytest
from gmpy2 import powmod
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.framework.horizontal.densenet.assist_trainer import HorizontalDensenetAssistTrainer
from algorithm.framework.horizontal.densenet.label_trainer import HorizontalDensenetLabelTrainer
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
np.random.seed(0)
data, label = np.random.randint(256, size=(
64, 32, 32, 3)), np.random.randint(10, size=64)
test_data, test_labels = data[:32], label[:32]
train_data, train_labels = data[32:64], label[32:64]
np.savez("/opt/dataset/unit_test/test_data.npz",
data=test_data, labels=test_labels)
np.savez("/opt/dataset/unit_test/train_data.npz",
data=train_data, labels=train_labels)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_densenet/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_densenet/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestDensenet:
@pytest.mark.parametrize("encryption_method", ['plain']) # ['otp', 'plain'] otp too slow
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(FedConfig, "node_id", 'node-1')
if encryption_method == "plain":
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
else:
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["otp"]
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap", return_value=(1, g_power_a))
Commu.node_id = "node-1"
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
rest = HorizontalDensenetLabelTrainer(conf)
rest_a = HorizontalDensenetAssistTrainer(assist_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(rest_a.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(
rest.model.state_dict(), len(rest.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect, params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
rest.fit()
rest_a.fit()
| 6,733 | 39.083333 | 131 | py |
XFL | XFL-master/test/algorithm/framework/transfer/test_transfer_logistic_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import torch
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.framework.transfer.logistic_regression.label_trainer import \
TransferLogisticRegressionLabelTrainer
from algorithm.framework.transfer.logistic_regression.trainer import \
TransferLogisticRegressionTrainer
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.utils.config_sync import ConfigSynchronizer
def prepare_data():
case_dict = {}
for i in range(30):
if i == 0:
case_dict.update({f"x{i:0>2d}": [0] * 100})
elif i == 10:
case_dict.update({f"x{i:0>2d}": [1] * 100})
elif i == 20:
case_dict.update({f"x{i:0>2d}": [2] * 100})
else:
case_dict.update({f"x{i:0>2d}": np.random.random(100)})
case_df = pd.DataFrame(case_dict)
case_df["y"] = np.where(case_df["x00"] + case_df["x10"] + case_df["x20"] + case_df["x29"] > 3.5, 1, 0)
columns_labeled = ["y"] + [f"x{i:0>2d}" for i in range(15)]
columns_1 = [f"x{i:0>2d}" for i in range(15, 30)]
case_df[columns_labeled].head(60).to_csv(
"/opt/dataset/unit_test/train_labeled.csv", index=True
)
case_df[columns_labeled].tail(20).to_csv(
"/opt/dataset/unit_test/test_labeled.csv", index=True
)
case_df[columns_1].head(80).tail(60).to_csv(
"/opt/dataset/unit_test/train_1.csv", index=True
)
case_df[columns_1].tail(20).to_csv(
"/opt/dataset/unit_test/test_1.csv", index=True
)
overlap_index = np.linspace(20, 59, 40, dtype=np.int16)
np.save("/opt/dataset/unit_test/overlap_index.npy", overlap_index)
@pytest.fixture()
def get_label_trainer_conf():
with open("python/algorithm/config/transfer_logistic_regression/label_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/transfer_logistic_regression/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestTransferLogisticRegression:
@pytest.mark.parametrize("encryption_method", ["plain"])
def test_label_trainer(self, get_label_trainer_conf, get_trainer_conf, encryption_method, mocker):
# label trainer 流程测试
conf = get_label_trainer_conf
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'node-1'
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
FedConfig, "get_trainer", return_value=["node-2"]
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(DualChannel, "send", return_value=0)
def mock_recv():
if mock_channel_recv.call_count <= lrlt.global_epoch * lrlt.local_epoch:
return (torch.rand(40, 5), torch.rand(40, 5, 5), torch.rand(40, 5))
else:
return torch.rand(20, 5)
mock_channel_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
ConfigSynchronizer, "__init__", return_value=None
)
mocker.patch.object(
ConfigSynchronizer, "sync", return_value=conf
)
lrlt = TransferLogisticRegressionLabelTrainer(conf)
lrlt.fit()
# load pretrained model
lrlt.pretrain_model_path = "/opt/checkpoints/unit_test"
lrlt.pretrain_model_name = "transfer_logitstic_regression_0.model"
lrlt._set_model()
@pytest.mark.parametrize("encryption_method", ["plain"])
def test_trainer(self, get_trainer_conf, get_label_trainer_conf, encryption_method, mocker):
# trainer 流程测试
conf = get_trainer_conf
conf_l = get_label_trainer_conf
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'node-1'
mocker.patch.object(Commu, "node_id", "node-1")
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "node_id", 'node-1'
)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":conf_l["model_info"], "train_info": conf_l["train_info"]
}
)
lrt = TransferLogisticRegressionTrainer(conf)
mocker.patch.object(
DualChannel, "send", return_value=0
)
mocker.patch.object(
DualChannel, "recv", return_value=(torch.rand(40, 5, 5), torch.rand(40, 5), torch.rand(40, 1))
)
lrt.fit()
# load pretrained model
lrt.pretrain_model_path = "/opt/checkpoints/unit_test"
lrt.pretrain_model_name = "transfer_logitstic_regression_0.model"
lrt._set_model()
| 6,386 | 36.133721 | 106 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_woe_iv.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import numpy as np
import pandas as pd
import pytest
from sklearn.preprocessing import LabelEncoder
from algorithm.framework.vertical.binning_woe_iv.label_trainer import \
VerticalBinningWoeIvLabelTrainer
from algorithm.framework.vertical.binning_woe_iv.trainer import \
VerticalBinningWoeIvTrainer
from common.communication.gRPC.python.channel import BroadcastChannel
from common.crypto.paillier.paillier import Paillier
from common.communication.gRPC.python.commu import Commu
def prepare_data():
case_df = pd.DataFrame({
'x0': list(np.random.random(800)) + [999] * 200,
'x1': [0] * 500 + [1] * 500,
'x2': [999] * 1000,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': [1] * 1000
})
case_df = case_df.astype("float32")
y = [0] * 700 + [1] * 300
random.shuffle(y)
case_df['y'] = y
case_df[['y', 'x0', 'x1', 'x2']].reset_index().rename(columns={'index': 'id'}).to_csv(
"/opt/dataset/unit_test/breast_cancer_wisconsin_guest_train.csv", index=False
)
case_df[['x3', 'x4']].reset_index().rename(columns={'index': 'id'}).to_csv(
"/opt/dataset/unit_test/breast_cancer_wisconsin_host_train.csv", index=False
)
case_df[['y', 'x0', 'x1', 'x2']].reset_index().rename(columns={'index': 'id'}).iloc[:100].to_csv(
"/opt/dataset/unit_test/breast_cancer_wisconsin_guest_test.csv", index=False
)
case_df[['x3', 'x4']].reset_index().rename(columns={'index': 'id'}).iloc[:100].to_csv(
"/opt/dataset/unit_test/breast_cancer_wisconsin_host_test.csv", index=False
)
case_df.reset_index().rename(columns={'index': 'id'}).to_csv(
"/opt/dataset/unit_test/data.csv", index=False
)
@pytest.fixture()
def get_label_trainer_conf():
with open("python/algorithm/config/vertical_binning_woe_iv/label_trainer.json") as f:
label_trainer_conf = json.load(f)
label_trainer_conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
label_trainer_conf["output"]["path"] = "/opt/checkpoints/unit_test_1"
label_trainer_conf["input"]["trainset"][0]["name"] = "breast_cancer_wisconsin_guest_train.csv"
yield label_trainer_conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/vertical_binning_woe_iv/trainer.json") as f:
trainer_conf = json.load(f)
trainer_conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
trainer_conf["output"]["path"] = "/opt/checkpoints/unit_test_1"
trainer_conf["input"]["trainset"][0]["name"] = "breast_cancer_wisconsin_host_train.csv"
yield trainer_conf
@pytest.fixture(scope="module", autouse=True)
def env():
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
if os.path.exists("/opt/checkpoints/unit_test_1"):
shutil.rmtree("/opt/checkpoints/unit_test_1")
def simu_data():
case_df = pd.read_csv("/opt/dataset/unit_test/data.csv", index_col='id').reset_index(drop=True)
return case_df
class TestBinningWoeIv:
@pytest.mark.parametrize("encryption, binning", [
("paillier", "equal_width"), ("plain", "equal_width"),
("plain", "equal_frequency")])
def test_trainer(self, get_trainer_conf, encryption, binning, mocker):
case_df = simu_data()
train_conf = get_trainer_conf
with open("python/algorithm/config/vertical_binning_woe_iv/label_trainer.json") as f:
label_trainer_conf = json.load(f)
def mock_config_recv(*args, **kwargs):
tmp = {"train_info": label_trainer_conf["train_info"]}
return tmp
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mock_channel_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_config_recv
)
bwi = VerticalBinningWoeIvTrainer(train_conf)
if binning == "equal_frequency":
train_conf["train_info"]["train_params"]['binning']['method'] = "equal_frequency"
if encryption == "plain":
bwi.train_params["encryption"] = {
"plain": {}
}
encryption_config = bwi.train_params["encryption"]
if encryption == "paillier":
pri_context = Paillier.context(encryption_config["paillier"]["key_bit_size"],
djn_on=encryption_config["paillier"]["djn_on"])
elif encryption == "plain":
pass
def mock_recv(*args, **kwargs):
if encryption == "paillier":
if mock_channel_recv.call_count <= 1:
return pri_context.to_public().serialize()
elif mock_channel_recv.call_count % 2 == 0:
num_cores = -1 if encryption_config["paillier"]["parallelize_on"] else 1
label = case_df[["y"]].to_numpy().flatten().astype(np.int32)
en_label = Paillier.encrypt(pri_context,
label,
precision=encryption_config["paillier"]["precision"],
obfuscation=True,
num_cores=num_cores)
return Paillier.serialize(en_label)
elif encryption == "plain":
return case_df[["y"]]
mock_channel_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
bwi.fit()
@pytest.mark.parametrize("encryption, binning", [
("paillier", "equal_width"), ("plain", "equal_width"), ("plain", "equal_frequency")])
def test_label_trainer(self, get_label_trainer_conf, encryption, binning, mocker):
label_train_conf = get_label_trainer_conf
if binning == "equal_frequency":
label_train_conf["train_info"]["train_params"]["binning"]["method"] = "equal_frequency"
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
bwi = VerticalBinningWoeIvLabelTrainer(label_train_conf)
bwi.broadcast_channel.remote_ids = ["node-2"]
if encryption == "plain":
bwi.train_params["encryption"] = {
"plain": {}
}
encryption_config = bwi.train_params["encryption"]
if encryption == "paillier":
pri_context = Paillier.context(encryption_config["paillier"]["key_bit_size"],
djn_on=encryption_config["paillier"]["djn_on"])
pub_context = Paillier.context_from(pri_context.to_public().serialize())
elif encryption == "plain":
pass
def mock_collect(*args, **kwargs):
case_df = simu_data()
y = case_df[["y"]]
case_df = case_df[['x3', 'x4']]
bin_num = bwi.train_params["binning"]["bins"]
labels = [i for i in range(bin_num)]
columns_name = case_df.columns
if bwi.train_params["binning"]["method"] == "equal_width":
case_df = pd.Series(case_df.columns).apply(
lambda x: pd.cut(case_df[x], bin_num, retbins=True, labels=labels)[0]).T
else:
case_df = pd.Series(case_df.columns).apply(
lambda x: pd.qcut(case_df[x], bin_num, retbins=True, duplicates='drop')[0]).T
for i in case_df.columns:
case_df[i] = LabelEncoder().fit_transform(case_df[i])
case_df.columns = columns_name
if encryption == "paillier":
# num_cores = -1 if encryption_config["parallelize_on"] else 1
# label = y.to_numpy().flatten().astype(np.int32)
# en_label = Paillier.encrypt(pri_context,
# label,
# precision=encryption_config["precision"],
# obfuscation=True,
# num_cores=num_cores)
# encrypt_id_label_pair = Paillier.serialize(en_label)
# en_label = Paillier.ciphertext_from(pub_context, encrypt_id_label_pair)
# encrypt_id_label_pair = pd.DataFrame(en_label).rename(columns={0: 'y'})
# tmp = []
#
# for feat in case_df.columns:
# feature_df = encrypt_id_label_pair.join(case_df[feat])
# tmp.append(feature_df.groupby([feat])['y'].agg({'count', 'sum'}))
# bins_count = dict(zip(case_df.columns, [i['count'] for i in tmp]))
# woe_feedback_list = dict(zip(case_df.columns, [i['sum'] for i in tmp]))
# for _, feature in woe_feedback_list.items():
# woe_feedback_list[_] = feature.apply(lambda x: x.serialize())
return [{"woe_feedback_list": {}, "bins_count": {}}]
elif encryption == "plain":
encrypt_id_label_pair = pd.DataFrame(y)
tmp = []
for feat in case_df.columns:
feature_df = encrypt_id_label_pair.join(case_df[feat])
tmp.append(feature_df.groupby([feat])['y'].agg({'count', 'sum'}))
bins_count = dict(zip(case_df.columns, [i['count'] for i in tmp]))
woe_feedback_list = dict(zip(case_df.columns, [i['sum'] for i in tmp]))
return [{"woe_feedback_list": woe_feedback_list, "bins_count": bins_count}]
mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
bwi.fit()
# 检查是否正常留存
assert os.path.exists("/opt/checkpoints/unit_test_1/woe_iv_result_[STAGE_ID].json")
with open("/opt/checkpoints/unit_test_1/woe_iv_result_[STAGE_ID].json", "r",
encoding='utf-8') as f:
conf = json.loads(f.read())
for k in ["woe", "iv", "count_neg", "count_pos", "ratio_pos", "ratio_neg"]:
assert k in conf
| 11,497 | 43.393822 | 102 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_pearson.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import pickle
import numpy as np
import pandas as pd
import pytest
import service.fed_config
from algorithm.framework.vertical.pearson.label_trainer import VerticalPearsonLabelTrainer
from algorithm.framework.vertical.pearson.trainer import VerticalPearsonTrainer
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from algorithm.core.paillier_acceleration import embed
from common.communication.gRPC.python.commu import Commu
def prepare_data():
label_list = [0] * 500 + [1] * 500
np.random.shuffle(label_list)
x0 = np.random.random(1000)
df = pd.DataFrame({
"y": label_list,
"x0": x0,
"x1": x0 + 0.01,
"x2": np.random.random(1000),
"x3": np.random.random(1000),
"x4": np.random.random(1000)
})
df[["y", "x0", "x2"]].to_csv(
"/opt/dataset/unit_test/train_guest.csv", index=True, index_label='id'
)
df[["x1", "x3", "x4"]].to_csv(
"/opt/dataset/unit_test/train_host.csv", index=True, index_label='id'
)
@pytest.fixture(scope="module", autouse=True)
def env():
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
@pytest.fixture()
def get_label_trainer_conf():
with open("python/algorithm/config/vertical_pearson/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/vertical_pearson/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
class TestVerticalPearsonTrainer:
@pytest.mark.parametrize("node_id, encryption", [
("node-2", "plain"), ("node-3", "plain"),
("node-2", "paillier"), ("node-3", "paillier")
])
def test_trainer_3party(self, get_trainer_conf, mocker, node_id, encryption):
with open("python/algorithm/config/vertical_pearson/label_trainer.json") as f:
label_trainer_conf = json.load(f)
if encryption == "plain":
label_trainer_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
sample_size = 200
conf = get_trainer_conf
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2", "node-3"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2", "node-3"]
)
def mock_context(use_pickle=False):
if mock_recv.call_count <= 1:
return {"train_info": label_trainer_conf["train_info"]}
else:
encryption_params = label_trainer_conf.get("train_info").get("train_params").get(
"encryption"
)
encryption = list(encryption_params.keys())[0]
if encryption == "plain":
return None
elif encryption == "paillier":
public_context = Paillier.context(
encryption_params[encryption].get("key_bit_size"),
encryption_params[encryption].get("djn_on")
).to_public().serialize()
return public_context
mock_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_context
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
vpt = VerticalPearsonTrainer(conf)
conf["train_info"]["train_params"]["sample_size"] = sample_size
if encryption == "plain":
conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
vpt.node_id = node_id
vpt.channels["trainer_com"]["node-2"] = DualChannel(
name="trainer_com_node-2_node-3",
ids=["node-3", "node-2"]
)
vpt.channels["trainer_com"]["node-3"] = DualChannel(
name="trainer_com_node-3_node-2",
ids=["node-2", "node-3"]
)
df = pd.read_csv("/opt/dataset/unit_test/train_guest.csv", index_col=0)
def mock_result_feature1():
if flag_mocker1.call_count == 1:
return False
df['x0'] = (df['x0'] - df['x0'].mean()) / df['x0'].std()
if vpt.encryption == "plain":
other = df['x0'].to_numpy()
return other, True
elif vpt.encryption == "paillier":
pc = Paillier.context_from(vpt.public_context.serialize())
other = Paillier.encrypt(
context=pc,
data=df['x0'].to_numpy(),
obfuscation=True,
num_cores=1
)
return other, True
flag_mocker1 = mocker.patch.object(
vpt.channels["trainer_feature_com"], "recv", side_effect=mock_result_feature1
)
def mock_result_feature2():
if flag_mocker2.call_count == 1:
return False
df['x0'] = (df['x0'] - df['x0'].mean()) / df['x0'].std()
if vpt.encryption == "plain":
other = df['x0'].to_numpy()
return other, 1, True
elif vpt.encryption == "paillier":
pc = Paillier.context_from(vpt.public_context.serialize())
other = Paillier.encrypt(
context=pc,
data=df['x0'].to_numpy(),
obfuscation=True,
num_cores=1
)
return other, 1, True
flag_mocker2 = mocker.patch.object(
vpt.channels["trainer_com"]["node-2"], "recv", side_effect=mock_result_feature2
)
def mock_result_feature3():
if flag_mocker3.call_count == 1:
return False
df['x0'] = (df['x0'] - df['x0'].mean()) / df['x0'].std()
if vpt.encryption == "plain":
other = df['x0'].to_numpy()
return other, 1, True
elif vpt.encryption == "paillier":
pc = Paillier.context_from(vpt.public_context.serialize())
other = Paillier.encrypt(
context=pc,
data=df['x0'].to_numpy(),
obfuscation=True,
num_cores=1
)
return other, 1, True
flag_mocker3 = mocker.patch.object(
vpt.channels["trainer_com"]["node-3"], "recv", side_effect=mock_result_feature3
)
mocker.patch.object(
vpt.channels["sample_idx_com"], "recv", return_value=list(range(200))
)
vpt.fit()
with open("/opt/checkpoints/unit_test/vertical_pearson_[STAGE_ID].pkl", 'rb') as f:
model = pickle.load(f)
assert "corr" in model
assert len(model["features"]) == 3
assert model["feature_source"] == [node_id, node_id, node_id]
assert "feature_mapping" in model
def test_label_trainer(self, get_label_trainer_conf, mocker):
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
vpt = VerticalPearsonLabelTrainer(get_label_trainer_conf)
vpt.node_id = "node-1"
df1 = pd.read_csv("/opt/dataset/unit_test/train_guest.csv", index_col=0)
del df1['y']
df1 = (df1 - df1.mean()) / df1.std()
df2 = pd.read_csv("/opt/dataset/unit_test/train_host.csv", index_col=0)
df2 = (df2 - df2.mean()) / df2.std()
def mock_result_feature():
e = embed(df1.to_numpy().T)
c = Paillier.encrypt(vpt.private_context, e)
local_mat = np.array(df2.to_numpy() * 10 ** vpt.encryption_param.precision, dtype=int)
corr = np.dot(local_mat.T, c).T
return corr
def mock_result_corr():
corr = np.dot(df2.T, df2)
corr /= len(df2)
summary = {("node-2", "node-2"): corr}
return summary
mocker.patch.object(
vpt.channels["trainer_feature_com"]["node-2"], "recv", side_effect=mock_result_feature
)
mocker.patch.object(
vpt.channels["trainer_corr_com"]["node-2"], "recv", side_effect=mock_result_corr
)
vpt.fit()
with open("/opt/checkpoints/unit_test/vertical_pearson_[STAGE_ID].pkl", 'rb') as f:
model = pickle.load(f)
assert "corr" in model
assert len(model["features"]) == 5
assert model["feature_source"] == ['node-1', 'node-1', 'node-2', 'node-2', 'node-2']
np.testing.assert_almost_equal(model["corr"][0][2], np.dot(df1["x0"], df2["x1"]) / len(df1), decimal=3)
np.testing.assert_almost_equal(model["corr"][0][3], np.dot(df1["x0"], df2["x3"]) / len(df1), decimal=3)
np.testing.assert_almost_equal(model["corr"][0][4], np.dot(df1["x0"], df2["x4"]) / len(df1), decimal=3)
np.testing.assert_almost_equal(model["corr"][1][2], np.dot(df1["x2"], df2["x1"]) / len(df1), decimal=3)
np.testing.assert_almost_equal(model["corr"][1][3], np.dot(df1["x2"], df2["x3"]) / len(df1), decimal=3)
np.testing.assert_almost_equal(model["corr"][1][4], np.dot(df1["x2"], df2["x4"]) / len(df1), decimal=3)
def test_label_trainer_with_selection(self, get_label_trainer_conf, mocker):
conf = get_label_trainer_conf
conf["train_info"]["train_params"]["col_index"] = [2]
conf["train_info"]["train_params"]["col_names"] = "x2"
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
vpt = VerticalPearsonLabelTrainer(conf)
vpt.node_id = "node-1"
df1 = pd.read_csv("/opt/dataset/unit_test/train_guest.csv", index_col=0)
del df1['y']
df1 = (df1 - df1.mean()) / df1.std()
df2 = pd.read_csv("/opt/dataset/unit_test/train_host.csv", index_col=0)
df2 = (df2 - df2.mean()) / df2.std()
def mock_result_feature():
e = embed(df1.to_numpy().T)
c = Paillier.encrypt(vpt.private_context, e)
local_mat = np.array(df2.to_numpy() * 10 ** vpt.encryption_param.precision, dtype=int)
corr = np.dot(local_mat.T, c).T
return corr
def mock_result_corr():
corr = np.dot(df2.T, df2)
corr /= len(df2)
summary = {("node-2", "node-2"): corr}
return summary
mocker.patch.object(
vpt.channels["trainer_feature_com"]["node-2"], "recv", side_effect=mock_result_feature
)
mocker.patch.object(
vpt.channels["trainer_corr_com"]["node-2"], "recv", side_effect=mock_result_corr
)
vpt.fit()
with open("/opt/checkpoints/unit_test/vertical_pearson_[STAGE_ID].pkl", 'rb') as f:
model = pickle.load(f)
assert "corr" in model
assert len(model["features"]) == 5
assert model["feature_source"] == ['node-1', 'node-1', 'node-2', 'node-2', 'node-2']
np.testing.assert_almost_equal(model["corr"][0][2], np.dot(df1["x0"], df2["x1"]) / len(df1), decimal=3)
np.testing.assert_almost_equal(model["corr"][0][3], np.dot(df1["x0"], df2["x3"]) / len(df1), decimal=3)
np.testing.assert_almost_equal(model["corr"][0][4], np.dot(df1["x0"], df2["x4"]) / len(df1), decimal=3)
np.testing.assert_almost_equal(model["corr"][1][2], np.dot(df1["x2"], df2["x1"]) / len(df1), decimal=3)
np.testing.assert_almost_equal(model["corr"][1][3], np.dot(df1["x2"], df2["x3"]) / len(df1), decimal=3)
np.testing.assert_almost_equal(model["corr"][1][4], np.dot(df1["x2"], df2["x4"]) / len(df1), decimal=3)
def test_label_trainer_fast(self, get_label_trainer_conf, mocker):
sample_size = 200
conf = get_label_trainer_conf
conf["train_info"]["train_params"]["sample_size"] = sample_size
conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
vpt = VerticalPearsonLabelTrainer(conf)
vpt.node_id = "node-1"
df1 = pd.read_csv("/opt/dataset/unit_test/train_guest.csv", index_col=0)
del df1['y']
df1 = (df1 - df1.mean()) / df1.std()
df2 = pd.read_csv("/opt/dataset/unit_test/train_host.csv", index_col=0)
df2 = (df2 - df2.mean()) / df2.std()
def mock_result_feature():
if mock_result.call_count > 1:
f = 'x2'
else:
f = 'x0'
local_mat = df2.to_numpy()
corr = np.dot(local_mat.T, df1[f])
return corr
def mock_result_corr():
corr = np.dot(df2.T, df2)
corr /= len(df2)
summary = {("node-2", "node-2"): corr}
return summary
mock_result = mocker.patch.object(
vpt.channels["trainer_feature_com"]["node-2"], "recv", side_effect=mock_result_feature
)
mocker.patch.object(
vpt.channels["trainer_corr_com"]["node-2"], "recv", side_effect=mock_result_corr
)
vpt.fit()
with open("/opt/checkpoints/unit_test/vertical_pearson_[STAGE_ID].pkl", 'rb') as f:
model = pickle.load(f)
assert "corr" in model
assert len(model["features"]) == 5
assert model["feature_source"] == ['node-1', 'node-1', 'node-2', 'node-2', 'node-2']
np.testing.assert_almost_equal(model["corr"][0][2], np.dot(df1["x0"], df2["x1"]) / sample_size, decimal=2)
np.testing.assert_almost_equal(model["corr"][0][3], np.dot(df1["x0"], df2["x3"]) / sample_size, decimal=2)
np.testing.assert_almost_equal(model["corr"][0][4], np.dot(df1["x0"], df2["x4"]) / sample_size, decimal=2)
np.testing.assert_almost_equal(model["corr"][1][2], np.dot(df1["x2"], df2["x1"]) / sample_size, decimal=2)
np.testing.assert_almost_equal(model["corr"][1][3], np.dot(df1["x2"], df2["x3"]) / sample_size, decimal=2)
np.testing.assert_almost_equal(model["corr"][1][4], np.dot(df1["x2"], df2["x4"]) / sample_size, decimal=2)
@pytest.mark.parametrize("encryption", ["plain", "paillier"])
def test_trainer(self, get_trainer_conf, mocker, encryption):
with open("python/algorithm/config/vertical_pearson/label_trainer.json") as f:
label_trainer_conf = json.load(f)
if encryption == "plain":
label_trainer_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
def mock_context(use_pickle=False):
if mock_recv.call_count <= 1:
return {"train_info": label_trainer_conf["train_info"]}
else:
encryption_params = label_trainer_conf.get("train_info").get("train_params").get(
"encryption"
)
encryption = list(encryption_params.keys())[0]
if encryption == "plain":
return None
elif encryption == "paillier":
public_context = Paillier.context(
encryption_params[encryption].get("key_bit_size"),
encryption_params[encryption].get("djn_on")
).to_public().serialize()
return public_context
mock_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_context
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
vpt = VerticalPearsonTrainer(get_trainer_conf)
vpt.node_id = "node-2"
df = pd.read_csv("/opt/dataset/unit_test/train_guest.csv", index_col=0)
def mock_result_feature():
df['x0'] = (df['x0'] - df['x0'].mean()) / df['x0'].std()
flag = True
if vpt.encryption == "plain":
other = df['x0'].to_numpy()
return other, 'x0', flag
elif vpt.encryption == "paillier":
pc = Paillier.context_from(vpt.public_context.serialize())
other = Paillier.encrypt(
context=pc,
data=df['x0'].to_numpy(),
obfuscation=True,
num_cores=1
)
return other, flag
mocker.patch.object(
vpt.channels["trainer_feature_com"], "recv", side_effect=mock_result_feature
)
vpt.fit()
with open("/opt/checkpoints/unit_test/vertical_pearson_[STAGE_ID].pkl", 'rb') as f:
model = pickle.load(f)
assert "corr" in model
assert len(model["features"]) == 3
assert model["feature_source"] == ['node-2', 'node-2', 'node-2']
assert "feature_mapping" in model
| 21,221 | 40.368421 | 118 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_kmeans.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import pickle
import shutil
import numpy as np
import pandas as pd
import pytest
import torch
from sklearn.metrics import davies_bouldin_score
import service.fed_config
from algorithm.framework.vertical.kmeans.assist_trainer import \
VerticalKmeansAssistTrainer
from algorithm.framework.vertical.kmeans.table_agg_base import (
TableAggregatorAbstractAssistTrainer, TableAggregatorAbstractTrainer)
from algorithm.framework.vertical.kmeans.table_agg_otp import (
TableAggregatorOTPAssistTrainer, TableAggregatorOTPTrainer)
from algorithm.framework.vertical.kmeans.table_agg_plain import (
TableAggregatorPlainAssistTrainer, TableAggregatorPlainTrainer)
from algorithm.framework.vertical.kmeans.trainer import VerticalKmeansTrainer
from algorithm.framework.vertical.kmeans.label_trainer import VerticalKmeansLabelTrainer
from common.communication.gRPC.python.channel import (
BroadcastChannel, DualChannel)
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.diffie_hellman import DiffieHellman
def prepare_data():
label_list = [0, 1, 2, 3, 4] * 200
np.random.shuffle(label_list)
case_df = pd.DataFrame({
"y": label_list,
"x0": np.random.random(1000) * 0.2 + np.array(label_list),
"x1": np.random.random(1000)
})
case_df[['y', 'x0', 'x1']].to_csv(
"/opt/dataset/unit_test/train_guest.csv", index=True, index_label='id'
)
case_df[['x0', 'x1']].to_csv(
"/opt/dataset/unit_test/train_host.csv", index=True, index_label='id'
)
case_df[['x0', 'x1']].to_csv(
"/opt/dataset/unit_test/train_guest_without_id.csv", index=False
)
case_df[['x0', 'x1']].to_csv(
"/opt/dataset/unit_test/train_host_without_id.csv", index=False
)
mock_config = {
"train_info": {
"train_params": {
"encryption": {
"otp": {
"key_bitlength": 128,
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": 3072,
"optimized": True
},
"csprng": {
"name": "hmac_drbg",
"method": "sha512"
}
}
},
"k": 5,
"max_iter": 50,
"tol": 1e-6,
"random_seed": 50
}
}
}
@pytest.fixture(scope="module", autouse=True)
def env():
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
@pytest.fixture()
def get_label_trainer_conf():
with open("python/algorithm/config/vertical_kmeans/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/vertical_kmeans/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture()
def get_scheduler_conf():
with open("python/algorithm/config/vertical_kmeans/assist_trainer.json") as f:
conf = json.load(f)
yield conf
class TestVerticalKmeansTrainer:
def test_init_method(self, mocker, get_label_trainer_conf, get_trainer_conf, get_scheduler_conf):
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "send", return_value=None
)
conf = copy.deepcopy(get_label_trainer_conf)
def mock_func(*args, **kwargs):
if mock_dual_recv.call_count == 1:
return mock_config
mock_dual_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
label_trainer = VerticalKmeansLabelTrainer(conf)
mocker.patch.object(
DualChannel, "recv", return_value=np.array([1.0] * 1000)
)
label_trainer.init = "kmeans++"
label_trainer.init_centers()
conf = copy.deepcopy(get_scheduler_conf)
conf["train_info"] = mock_config["train_info"]
assist_trainer = VerticalKmeansAssistTrainer(conf)
mocker.patch.object(
TableAggregatorOTPAssistTrainer, "aggregate", return_value=torch.Tensor(list(range(100))).reshape(20, 5)
)
assist_trainer.init = "kmeans++"
assist_trainer.init_centers()
conf = copy.deepcopy(get_trainer_conf)
trainer = VerticalKmeansTrainer(conf)
mocker.patch.object(
DualChannel, "recv", return_value=[1, 2, 3, 4, 5]
)
trainer.init = "kmeans++"
trainer.init_centers()
def test_table_agg_otp(self, mocker):
pd_table = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
table = torch.tensor(pd_table.values)
def mock_diffiehellman():
ret = b"\xf0F\xc6\x1dJ(\xb0\x19\xc3j6$bw\xcb\xad\xe1\xdd?\x1c\xd728\xa9\x0eD\xf4\x95\xd4)*," \
b"@Sd\x897\xb4N7GG\x17\x01\xa6#-$]w3\xc2x\x97\x045\xb4\xd8c\xa9\xa4\x9f\xdb\x1a?\xd0\x80\xd7=\x02" \
b"\x07\xb0A\xeaQ\x17\x89W:\x1a\x85.\xea\x19O\x8b\xe8\x83\x04\xf4\xb4\\S~\xff1\x8cT\xeb\x99x9;\xb9" \
b"\x90\x00\x00\x96\x91A\x1d\xe8\xa0l6\xf1\xc1P\xf4\x14\xf2\xd5\xceg}\xc04e/l3^o\xd4\xe0\tC7\xd7\xaa" \
b"&\xfa4\x1378`\xb9\xd5\t\x0ez\xe3\x80\xde\r;\x8dI\x80\\\xea\xdf\xce\xe3a\xd2\xe3\x88\nm`\xce7" \
b"\xf14CUe\xac]\x93\xc5\x86\xed\x19K{" \
b"x\x93\x98\xdd\xb2\x1aS\xb5q\x071\xb0\x0b'x\x16\xfcE\xccw\x11U@\x9aB\xa7\x1a\xbb\x80\xd3tn@\xc6\x1a" \
b"\xc31Y\xe4\xe0\x07\x83\xca\xecW\xa0\x08\x12\x93\xc3g\xad\xadF\x8c\xcd\x105\xe6\x07\x0f\xc9\xa1\xe9" \
b"\xee\xf9M\x16\xf8b\xb5]x\x0b3\x11\xafn\xa2w\xb4]1\x9f\xb3\xa5\xba/\xd9R\xa8*\xddi\x83\x1bg\xde\xf2" \
b"\xcd\xc7\xb7 m\xb28`\xe5UH;\x1b\xc8Mq\xa8\x03\xa78x\x01\xb3\x95\x81r.\x07\\]\xc1\x1d\xa5\xff\x99" \
b"\x8b\xd0\xab\\\\<\x03\x1co\x08+\x964*\t\x80v\xd6m2:es\x0f\xa2\x1at\x0b-\x8aN\xa3\x0bu\xa9XoN\xcd" \
b"\xd3{\x10\x8dO\x7f\xba\x99\n\x99jHqL\xa7aV\r\xf7\x1d\xde\xe8\x18 "
return ret
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
mocker.patch.object(
BroadcastChannel, "collect", return_value=[pd_table, pd_table]
)
encryption = {
"otp": {
"key_bitlength": 128,
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": 3072,
"optimized": True
},
"csprng": {
"name": "hmac_drbg",
"method": "sha512"
}
}
}
mocker.patch.object(
DiffieHellman, "exchange", return_value=mock_diffiehellman()
)
table_trainer = TableAggregatorOTPTrainer(
sec_conf=encryption["otp"], trainer_ids=['node-1', 'node-2'])
table_trainer.send(table)
assert table_trainer.send(None) is None
mocker.patch.object(
BroadcastChannel, "collect", return_value=[pd_table.to_numpy(), pd_table.to_numpy()]
)
table_scheduler = TableAggregatorOTPAssistTrainer(
sec_conf=encryption["otp"], trainer_ids=['node-1', 'node-2'])
table_scheduler.aggregate()
@pytest.mark.parametrize("computing_engine", ["local", "spark"])
def test_label_trainer(self, get_label_trainer_conf, computing_engine, mocker):
conf = get_label_trainer_conf
conf["computing_engine"] = computing_engine
# mock 类初始化需要的函数,避免建立通信通道时报错
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
if mock_dual_recv.call_count == 1:
return mock_config
mock_dual_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
TableAggregatorOTPTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "send"
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["trainer-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["trainer-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="scheduler"
)
# 定义初始化类
vkt = VerticalKmeansLabelTrainer(conf)
# mock scheduler侧的接口返回
def mock_get_cluster():
return VerticalKmeansAssistTrainer.get_cluster(vkt.dist_table)
def mock_converged_flag():
return bool(vkt.local_tol < vkt.tol)
mocker.patch.object(
vkt.channels.get("init_center", DualChannel), "recv",
return_value=np.random.choice(1000, vkt.k, replace=False)
)
mocker.patch.object(
vkt.channels["cluster_result"], "recv", side_effect=mock_get_cluster
)
mocker.patch.object(
vkt.channels["converged_flag"], "recv", side_effect=mock_converged_flag
)
vkt.fit()
# 是否能正常收敛
assert vkt.is_converged
assert os.path.exists(
"/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model")
with open("/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model", "rb") as f:
model = json.load(f)
assert model["k"] == vkt.k
assert model["iter"] <= vkt.max_iter
assert model["is_converged"]
assert model["tol"] == vkt.tol
assert len(model["cluster_centers"]) == vkt.k
def test_label_trainer_only_features(self, get_label_trainer_conf, mocker):
conf = get_label_trainer_conf
conf["input"]["trainset"][0]["has_id"] = False
conf["input"]["trainset"][0]["has_label"] = False
# mock 类初始化需要的函数,避免建立通信通道时报错
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
if mock_dual_recv.call_count == 1:
return mock_config
mock_dual_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
TableAggregatorOTPTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "send"
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["trainer-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["trainer-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="scheduler"
)
# 定义初始化类
vkt = VerticalKmeansLabelTrainer(conf)
# mock scheduler侧的接口返回
def mock_get_cluster():
return VerticalKmeansAssistTrainer.get_cluster(vkt.dist_table)
def mock_converged_flag():
return bool(vkt.local_tol < vkt.tol)
mocker.patch.object(
vkt.channels.get("init_center", DualChannel), "recv", return_value=np.random.choice(1000, vkt.k, replace=False)
)
mocker.patch.object(
vkt.channels["cluster_result"], "recv", side_effect=mock_get_cluster
)
mocker.patch.object(
vkt.channels["converged_flag"], "recv", side_effect=mock_converged_flag
)
vkt.fit()
# 是否能正常收敛
assert vkt.is_converged
assert os.path.exists(
"/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model")
with open("/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model", "rb") as f:
model = json.load(f)
assert model["k"] == vkt.k
assert model["iter"] <= vkt.max_iter
assert model["is_converged"]
assert model["tol"] == vkt.tol
assert len(model["cluster_centers"]) == vkt.k
@pytest.mark.parametrize("computing_engine", ["local", "spark"])
def test_trainer(self, get_trainer_conf, computing_engine, mocker):
conf = get_trainer_conf
conf["computing_engine"] = computing_engine
# mock 类初始化需要的函数,避免建立通信通道时报错
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
if mock_dual_recv.call_count == 1:
return mock_config
mock_dual_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
TableAggregatorOTPTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "send"
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["trainer-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["trainer-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="scheduler"
)
# 定义初始化类
vkt = VerticalKmeansTrainer(get_trainer_conf)
# 初始化类中心
init_centers = np.random.choice(1000, vkt.k, replace=False)
mocker.patch.object(
vkt.channels.get("init_center", DualChannel), "recv", return_value=init_centers
)
# mock scheduler侧的接口返回
def mock_get_cluster():
return VerticalKmeansAssistTrainer.get_cluster(vkt.dist_table)
def mock_converged_flag():
return bool(vkt.local_tol < vkt.tol)
mocker.patch.object(
vkt.channels["cluster_result"], "recv", side_effect=mock_get_cluster
)
mocker.patch.object(
vkt.channels["converged_flag"], "recv", side_effect=mock_converged_flag
)
vkt.fit()
# 是否能正常收敛
assert vkt.is_converged
assert os.path.exists(
"/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model")
with open("/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model", "rb") as f:
model = json.load(f)
assert model["k"] == vkt.k
assert model["iter"] <= vkt.max_iter
assert model["is_converged"]
assert model["tol"] == vkt.tol
assert len(model["cluster_centers"]) == vkt.k
# 检查输出
assert os.path.exists(
"/opt/checkpoints/unit_test/cluster_result_[STAGE_ID].csv")
if computing_engine == "local":
df = pd.read_csv(
"/opt/checkpoints/unit_test/cluster_result_[STAGE_ID].csv")
assert (df["id"] == vkt.train_ids).all()
# assert (df["cluster_label"] == vkt.cluster_result).all()
@pytest.mark.parametrize("computing_engine", ["local", "spark"])
def test_scheduler(self, get_scheduler_conf, computing_engine, mocker):
conf = get_scheduler_conf
conf["computing_engine"] = computing_engine
# mock 类初始化需要的函数,避免建立通信通道时报错
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
if mock_dual_recv.call_count == 1:
return mock_config
mock_dual_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
TableAggregatorOTPAssistTrainer, "__init__", return_value=None
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["trainer-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["trainer-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="scheduler"
)
# 定义初始化类
vks = VerticalKmeansAssistTrainer(conf)
def mock_dual_recv():
if mock_recv.call_count > 2:
return 1.0
else:
return 1000, 2
# mock trainer的发送结果
# tolerance
mock_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_dual_recv
)
# distance table
dist_table = torch.tensor(np.random.random((1000, vks.k)))
# center dist
center_dist = torch.tensor(np.random.random(vks.k * (vks.k - 1)))
def mock_aggregate():
if mock_agg.call_count > 1 and mock_agg.call_count % 2 == 1:
return center_dist
else:
return dist_table
mock_agg = mocker.patch.object(
vks.dist_table_agg_executor, "aggregate", side_effect=mock_aggregate
)
vks.fit()
def test_calc_dbi(self, get_scheduler_conf, get_label_trainer_conf, mocker):
# mock 类初始化需要的函数,避免建立通信通道时报错
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
return mock_config
mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
TableAggregatorOTPTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPAssistTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "send"
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["trainer-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["trainer-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="scheduler"
)
vkt = VerticalKmeansTrainer(get_label_trainer_conf)
vks = VerticalKmeansAssistTrainer(get_scheduler_conf)
init_centers = np.random.choice(1000, vkt.k, replace=False)
mocker.patch.object(
vkt.channels.get("init_center", DualChannel), "recv", return_value=init_centers
)
# 检查指标
center_ids = vkt.init_centers()
cluster_centers = vkt.train_features.iloc[center_ids]
dist_table = vkt.distance_table(cluster_centers)
cluster_result = vks.get_cluster(dist_table)
centers = vkt.calc_centers(cluster_centers, cluster_result)
center_dist = vkt.distance_between_centers(centers)
mocker.patch.object(
vks.dist_table_agg_executor, "aggregate", return_value=center_dist
)
vks.cluster_count_list = vks.calc_cluster_count(cluster_result)
dist_table = vkt.distance_table(centers)
vks.calc_dbi(dist_table, cluster_result, 0)
dbi_score = davies_bouldin_score(
vkt.train_features.to_numpy(), cluster_result)
np.testing.assert_almost_equal(vks.DBI, dbi_score, 3)
# 验证当一族结果为空时,DBI的计算
cluster_result_missing = []
for _ in cluster_result:
if _ != 1:
cluster_result_missing.append(_)
else:
cluster_result_missing.append(0)
# 重新计算簇中心坐标
centers = vkt.calc_centers(cluster_centers, cluster_result_missing)
center_dist = vkt.distance_between_centers(centers)
mocker.patch.object(
vks.dist_table_agg_executor, "aggregate", return_value=center_dist
)
vks.cluster_count_list = vks.calc_cluster_count(cluster_result_missing)
dist_table = vkt.distance_table(centers)
vks.calc_dbi(dist_table, cluster_result_missing, 1)
dbi_score = davies_bouldin_score(
vkt.train_features.to_numpy(), cluster_result_missing)
np.testing.assert_almost_equal(vks.DBI, dbi_score, 3)
def test_table_agg_base(self):
table_trainer = TableAggregatorAbstractTrainer()
table_trainer.send(pd.DataFrame({"x": [1, 2, 3]}))
table_scheduler = TableAggregatorAbstractAssistTrainer()
table_scheduler.aggregate()
def test_table_agg_plain(self, mocker):
pd_table = pd.DataFrame({"x": [1, 2, 3]})
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
mocker.patch.object(
BroadcastChannel, "collect", return_value=[pd_table, pd_table]
)
table_trainer = TableAggregatorPlainTrainer(sec_conf={"plain": {}})
table_trainer.send(pd_table)
table_scheduler = TableAggregatorPlainAssistTrainer(sec_conf={
"plain": {}})
aggregated_table = table_scheduler.aggregate()
assert aggregated_table["x"].iloc[2] == 6
| 23,214 | 35.330203 | 123 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_linear_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
import tenseal as ts
import numpy as np
import pandas as pd
import pytest
from pytest_mock import mocker
from common.crypto.paillier.paillier import Paillier
from service.fed_config import FedConfig
import service.fed_config
from algorithm.framework.vertical.linear_regression.assist_trainer import \
VerticalLinearRegressionAssistTrainer
from algorithm.framework.vertical.linear_regression.trainer import VerticalLinearRegressionTrainer
from algorithm.framework.vertical.linear_regression.label_trainer import VerticalLinearRegressionLabelTrainer
from common.communication.gRPC.python.channel import (BroadcastChannel, DualChannel)
def prepare_data():
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': 2 * np.random.random(1000) + 2.0,
'x2': 2 * np.random.random(1000) + 1.0,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': np.random.random(1000)
})
case_df['y'] = case_df['x0'] + case_df['x1'] + case_df['x3'] + 0.5 * case_df['x2']
case_df[['y', 'x0']].head(800).to_csv(
"/opt/dataset/unit_test/train_guest.csv", index=True
)
case_df[['y', 'x0']].tail(200).to_csv(
"/opt/dataset/unit_test/test_guest.csv", index=True
)
case_df[['x1', 'x2']].head(800).to_csv(
"/opt/dataset/unit_test/train_host1.csv", index=True
)
case_df[['x1', 'x2']].tail(200).to_csv(
"/opt/dataset/unit_test/test_host1.csv", index=True
)
case_df[['x3', 'x4']].head(800).to_csv(
"/opt/dataset/unit_test/train_host2.csv", index=True
)
case_df[['x3', 'x4']].tail(200).to_csv(
"/opt/dataset/unit_test/test_host2.csv", index=True
)
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
@pytest.fixture()
def get_label_trainer_conf():
with open("python/algorithm/config/vertical_linear_regression/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture()
def get_trainer1_conf():
with open("python/algorithm/config/vertical_linear_regression/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_host1.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_host1.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
class TestVerticalLinearRegressionTrainer:
@pytest.mark.parametrize("encryption", [{"ckks": {
"poly_modulus_degree": 8192, "coeff_mod_bit_sizes": [60, 40, 40, 60], "global_scale_bit_size": 40}},
{"plain": {}}, {"paillier": {"key_bit_size": 2048, "precision": 7, "djn_on": True, "parallelize_on": True}}])
def test_all_trainers(self, get_label_trainer_conf, encryption, mocker):
config_sync = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"write_training_prediction": True,
"write_validation_prediction": True,
"echo_training_metrics": True
},
"train_params": {
"global_epoch": 1,
"batch_size": 1000,
"encryption": encryption,
"optimizer": {
"lr": 0.01,
"p": 2,
"alpha": 1e-4
},
"metric": {
"mse": {},
"mape": {},
"mae": {},
"rmse": {}
},
"early_stopping": {
"key": "loss",
"patience": -1,
"delta": 0
},
"random_seed": 50
}
}
}
conf = get_label_trainer_conf
with open("python/algorithm/config/vertical_linear_regression/trainer.json") as f:
conf_t = json.load(f)
conf_t["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf_t["input"]["trainset"][0]["name"] = "train_host1.csv"
conf_t["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf_t["input"]["valset"][0]["name"] = "test_host1.csv"
conf_t["output"]["path"] = "/opt/checkpoints/unit_test"
conf["train_info"]["train_params"]["global_epoch"] = 1
conf["train_info"]["train_params"]["batch_size"] = 1000
conf["train_info"]["train_params"]["encryption"] = encryption
# test trainset not configured error
conf2 = copy.deepcopy(conf)
conf2["input"]["trainset"] = []
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
with pytest.raises(NotImplementedError) as e:
vlr_ = VerticalLinearRegressionLabelTrainer(conf2)
exec_msg = e.value.args[0]
assert exec_msg == "Trainset was not configured."
# test trainset type not configured error
conf1 = copy.deepcopy(conf)
conf1["input"]["trainset"][0]["type"] = "json"
with pytest.raises(NotImplementedError) as e:
vlr_ = VerticalLinearRegressionLabelTrainer(conf1)
exec_msg = e.value.args[0]
assert exec_msg == "Dataset type {} is not supported.".format(vlr_.input["trainset"][0]["type"])
# mock label_trainer
encryption_config = encryption
encryption_method = list(encryption.keys())[0]
if encryption_method == "ckks":
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config[encryption_method]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config[encryption_method]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config[encryption_method][
"global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
public_context = ts.context_from(serialized_public_context)
elif encryption_method == "paillier":
num_cores = -1 if encryption_config[encryption_method]["parallelize_on"] else 1
private_context = Paillier.context(encryption_config[encryption_method]["key_bit_size"],
djn_on=encryption_config[encryption_method]["djn_on"])
paillier_key = private_context.to_public().serialize()
public_context = Paillier.context_from(paillier_key)
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=['node-1']
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=['node-2', 'node-3']
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="assist_trainer"
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_broadcast_recv(*args, **kwargs):
if mock_broadcast_recv_func.call_count == 1:
return copy.deepcopy(config_sync)
else:
if vlr.encryption_method == "ckks":
return serialized_public_context
elif vlr.encryption_method == "paillier":
return private_context.to_public().serialize()
def mock_broadcast_collect(*args, **kwargs):
return [2, 2]
mock_broadcast_recv_func = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_broadcast_recv
)
vlr = VerticalLinearRegressionLabelTrainer(conf)
vlr_t = VerticalLinearRegressionTrainer(conf_t)
assert len(vlr.train_dataloader) == int(len(vlr.train) / vlr.batch_size) + 1
for batch_idx, (_x_batch, _) in enumerate(vlr_t.train_dataloader):
x_batch = _x_batch
break
for batch_idx, (_x_batch, _y_batch, _) in enumerate(vlr.train_dataloader):
x_batch_label = _x_batch
y_batch = _y_batch
break
for batch_idx, (_x_batch, _) in enumerate(vlr_t.val_dataloader):
x_batch_val = _x_batch
break
pred_trainer = vlr_t.model(x_batch)
pred_trainer_val = vlr_t.model(x_batch_val)
pred_label_trainer = vlr.model(x_batch_label)
loss_trainer = (pred_trainer ** 2).sum() / 2
pred_residual = pred_label_trainer - y_batch
loss_label_trainer = (pred_residual ** 2).sum() / 2
loss = loss_trainer + loss_label_trainer
d = pred_trainer + pred_residual
if encryption_method == "paillier":
en_pred_trainer_p = Paillier.serialize(Paillier.encrypt(public_context,
pred_trainer.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method][
"precision"],
obfuscation=True, num_cores=num_cores))
en_loss_trainer_p = Paillier.serialize(Paillier.encrypt(public_context,
float(loss_trainer),
precision=encryption_config[encryption_method][
"precision"],
obfuscation=True, num_cores=num_cores))
def mock_dual_label_t_recv(*args, **kwargs):
if encryption_method == "ckks":
if mock_label_t_recv.call_count == 1:
return ts.ckks_vector(public_context, pred_trainer.numpy().astype(np.float32).flatten()).serialize()
elif mock_label_t_recv.call_count == 2:
return ts.ckks_vector(public_context, loss_trainer.numpy().astype(np.float32).flatten()).serialize()
elif mock_label_t_recv.call_count == 3:
return pred_trainer.numpy().astype(np.float32).flatten()
elif mock_label_t_recv.call_count == 4:
return pred_trainer_val.numpy().astype(np.float32).flatten()
elif mock_label_t_recv.call_count == 5:
tmp = ("node-2", vlr_t.model.state_dict()["linear.weight"][0])
return tmp
elif encryption_method == "paillier":
if mock_label_t_recv.call_count == 1:
return en_pred_trainer_p
elif mock_label_t_recv.call_count == 2:
return en_loss_trainer_p
elif mock_label_t_recv.call_count == 3:
tmp = Paillier.ciphertext_from(public_context, en_pred_trainer_p)
return Paillier.serialize(np.sum(tmp * pred_trainer.numpy().astype(np.float32).flatten()))
elif mock_label_t_recv.call_count == 4:
return pred_trainer.numpy().astype(np.float32).flatten()
elif mock_label_t_recv.call_count == 5:
return pred_trainer_val.numpy().astype(np.float32).flatten()
elif mock_label_t_recv.call_count == 6:
tmp = ("node-2", vlr_t.model.state_dict()["linear.weight"][0])
return tmp
elif encryption_method == "plain":
if mock_label_t_recv.call_count == 1:
return pred_trainer.numpy().astype(np.float32).flatten()
elif mock_label_t_recv.call_count == 2:
return loss_trainer.numpy().astype(np.float32).flatten()
elif mock_label_t_recv.call_count == 3:
return pred_trainer.numpy().astype(np.float32).flatten()
elif mock_label_t_recv.call_count == 4:
return pred_trainer_val.numpy().astype(np.float32).flatten()
elif mock_label_t_recv.call_count == 5:
tmp = ("node-2", vlr_t.model.state_dict()["linear.weight"][0])
return tmp
def mock_dual_label_t_recv_1(*args, **kwargs):
if encryption_method == "ckks":
if mock_label_t_recv_1.call_count == 1:
return ts.ckks_vector(public_context, pred_trainer.numpy().astype(np.float32).flatten()).serialize()
elif mock_label_t_recv_1.call_count == 2:
return ts.ckks_vector(public_context, loss_trainer.numpy().astype(np.float32).flatten()).serialize()
elif mock_label_t_recv_1.call_count == 3:
return pred_trainer.numpy().astype(np.float32).flatten()
elif mock_label_t_recv_1.call_count == 4:
return pred_trainer_val.numpy().astype(np.float32).flatten()
elif mock_label_t_recv_1.call_count == 5:
tmp = ("node-3", vlr_t.model.state_dict()["linear.weight"][0])
return tmp
elif encryption_method == "paillier":
if mock_label_t_recv_1.call_count == 1:
return en_pred_trainer_p
elif mock_label_t_recv_1.call_count == 2:
return en_loss_trainer_p
elif mock_label_t_recv_1.call_count == 3:
tmp = Paillier.ciphertext_from(public_context, en_pred_trainer_p)
return Paillier.serialize(np.sum(tmp * pred_trainer.numpy().astype(np.float32).flatten()))
elif mock_label_t_recv_1.call_count == 4:
return pred_trainer.numpy().astype(np.float32).flatten()
elif mock_label_t_recv_1.call_count == 5:
return pred_trainer_val.numpy().astype(np.float32).flatten()
elif mock_label_t_recv_1.call_count == 6:
tmp = ("node-3", vlr_t.model.state_dict()["linear.weight"][0])
return tmp
elif encryption_method == "plain":
if mock_label_t_recv_1.call_count == 1:
return pred_trainer.numpy().astype(np.float32).flatten()
elif mock_label_t_recv_1.call_count == 2:
return loss_trainer.numpy().astype(np.float32).flatten()
elif mock_label_t_recv_1.call_count == 3:
return pred_trainer.numpy().astype(np.float32).flatten()
elif mock_label_t_recv_1.call_count == 4:
return pred_trainer_val.numpy().astype(np.float32).flatten()
elif mock_label_t_recv_1.call_count == 5:
tmp = ("node-3", vlr_t.model.state_dict()["linear.weight"][0])
return tmp
mock_label_t_recv = mocker.patch.object(
vlr.dual_channels["intermediate_label_trainer"]["node-2"], "recv", side_effect=mock_dual_label_t_recv
)
mock_label_t_recv_1 = mocker.patch.object(
vlr.dual_channels["intermediate_label_trainer"]["node-3"], "recv", side_effect=mock_dual_label_t_recv_1
)
mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_broadcast_collect
)
def mock_gradients_loss(*args, **kwargs):
if mock_gradients_loss_label.call_count == 1:
return loss
elif mock_gradients_loss_label.call_count == 2:
tmp_w = vlr.model.linear.weight
tmp_b = vlr.model.linear.bias
return {"noised_gradient_label_trainer_w": tmp_w, "noised_gradient_label_trainer_b": tmp_b}
mock_gradients_loss_label = mocker.patch.object(
vlr.dual_channels["gradients_loss"], "recv", side_effect=mock_gradients_loss
)
# fit label_trainer
vlr.fit()
# mock for trainer
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
def mock_trainer_collect(*args, **kwargs):
return [en_pred_trainer_p]
mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_trainer_collect
)
def mock_dual_recv(*args, **kwargs):
if mock_trainer_dual_recv.call_count == 1:
if encryption_method == "ckks":
return ts.ckks_vector(public_context, d.numpy().astype(np.float32).flatten()).serialize()
elif encryption_method == "paillier":
return Paillier.serialize(Paillier.encrypt(public_context, d.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method][
"precision"],
obfuscation=True, num_cores=num_cores))
elif encryption_method == "plain":
return d
elif mock_trainer_dual_recv.call_count == 2:
return [False, True, vlr.early_stopping_config["patience"]]
mock_trainer_dual_recv = mocker.patch.object(
vlr_t.dual_channels["intermediate_label_trainer"], "recv", side_effect=mock_dual_recv
)
def mock_gradients_trainer(*args, **kwargs):
return vlr_t.model.linear.weight
mocker.patch.object(
vlr_t.dual_channels["gradients_loss"], "recv", side_effect=mock_gradients_trainer
)
def mock_broadcast_trainer_recv(*args, **kwargs):
if encryption_method == "ckks":
return serialized_public_context
elif encryption_method == "paillier":
return private_context.to_public().serialize()
mocker.patch.object(
vlr_t.broadcast_channel, "recv", side_effect=mock_broadcast_trainer_recv
)
# fit vlr_t
vlr_t.fit()
# mock for assist_trainer
def mock_dual_init_recv(*args, **kwargs):
if mock_dual_init_recv_.call_count == 1:
return 1
elif mock_dual_init_recv_.call_count == 2:
return 1
elif mock_dual_init_recv_.call_count == 3:
return 1000
elif mock_dual_init_recv_.call_count == 4:
return encryption_config
elif mock_dual_init_recv_.call_count == 5:
return encryption_method
mock_dual_init_recv_ = mocker.patch.object(
DualChannel, "recv", side_effect=mock_dual_init_recv
)
vlr_a = VerticalLinearRegressionAssistTrainer()
if encryption_method == "paillier":
num_cores = -1 if encryption_config[encryption_method]["parallelize_on"] else 1
public_context = Paillier.context_from(vlr_a.public_context_ser)
elif encryption_method == "ckks":
public_context = ts.context_from(vlr_a.public_context_ser)
def mock_dual_label_t_recv(*args, **kwargs):
if mock_dual_label_recv_.call_count == 1:
if encryption_method == "ckks":
return ts.ckks_vector(public_context, loss.numpy().astype(np.float32).flatten()
).serialize()
elif encryption_method == "paillier":
return Paillier.serialize(Paillier.encrypt(public_context, float(loss),
precision=encryption_config[encryption_method][
"precision"], obfuscation=True, num_cores=num_cores))
elif encryption_method == "plain":
return loss
elif mock_dual_label_recv_.call_count == 2:
if encryption_method == "ckks":
return ts.ckks_vector(public_context, vlr.model.linear.weight.numpy().astype(np.float32).flatten()
).serialize()
elif encryption_method == "paillier":
return Paillier.serialize(Paillier.encrypt(public_context, vlr.model.linear.weight.numpy().astype(
np.float32).flatten(), precision=encryption_config[encryption_method]["precision"],
obfuscation=True, num_cores=num_cores))
elif mock_dual_label_recv_.call_count == 3:
if encryption_method == "ckks":
return ts.ckks_vector(public_context, vlr.model.linear.bias.numpy().astype(np.float32).flatten()
).serialize()
elif encryption_method == "paillier":
return Paillier.serialize(Paillier.encrypt(public_context, vlr.model.linear.bias.numpy().astype(
np.float32).flatten(), precision=encryption_config[encryption_method]["precision"],
obfuscation=True, num_cores=num_cores))
mock_dual_label_recv_ = mocker.patch.object(
vlr_a.dual_channels["gradients_loss"]['node-1'], "recv", side_effect=mock_dual_label_t_recv
)
def mock_dual_trainer_t_recv(*args, **kwargs):
if encryption_method == "ckks":
return ts.ckks_vector(public_context, vlr_t.model.linear.weight.numpy().astype(np.float32).flatten()
).serialize()
elif encryption_method == "paillier":
return Paillier.serialize(Paillier.encrypt(public_context, vlr_t.model.linear.weight.numpy().astype(
np.float32).flatten(), precision=encryption_config[encryption_method]["precision"],
obfuscation=True, num_cores=num_cores))
mocker.patch.object(
vlr_a.dual_channels["gradients_loss"]['node-2'], "recv", side_effect=mock_dual_trainer_t_recv
)
mocker.patch.object(
vlr_a.dual_channels["gradients_loss"]['node-3'], "recv", side_effect=mock_dual_trainer_t_recv
)
# fit assist_trainer
vlr_a.fit()
assert os.path.exists("/opt/checkpoints/unit_test/vertical_linear_regression_[STAGE_ID].pt")
assert os.path.exists("/opt/checkpoints/unit_test/linear_reg_metric_train_[STAGE_ID].csv")
assert os.path.exists("/opt/checkpoints/unit_test/linear_reg_metric_val_[STAGE_ID].csv")
assert os.path.exists("/opt/checkpoints/unit_test/linear_reg_prediction_train_[STAGE_ID].csv")
assert os.path.exists("/opt/checkpoints/unit_test/linear_reg_prediction_val_[STAGE_ID].csv")
assert os.path.exists("/opt/checkpoints/unit_test/linear_reg_feature_importance_[STAGE_ID].csv")
feature_importance = pd.read_csv("/opt/checkpoints/unit_test/linear_reg_feature_importance_[STAGE_ID].csv")
assert len(feature_importance) == 5
train_metric = pd.read_csv("/opt/checkpoints/unit_test/linear_reg_metric_train_[STAGE_ID].csv")
assert len(train_metric.columns) == 6
val_metric = pd.read_csv("/opt/checkpoints/unit_test/linear_reg_metric_val_[STAGE_ID].csv")
assert len(val_metric.columns) == 6
| 25,474 | 47.990385 | 120 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_logistic_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import tenseal as ts
import torch
from google.protobuf import json_format
import service.fed_config
import service.fed_node
import service.fed_control
from algorithm.framework.vertical.logistic_regression.label_trainer import \
VerticalLogisticRegressionLabelTrainer
from algorithm.framework.vertical.logistic_regression.trainer import \
VerticalLogisticRegressionTrainer
from common.communication.gRPC.python.channel import BroadcastChannel
from common.crypto.paillier.paillier import Paillier
from common.communication.gRPC.python.commu import Commu
from common.model.python.linear_model_pb2 import LinearModel
def prepare_data():
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': np.random.random(1000)
})
case_df['y'] = np.where(
case_df['x0'] + case_df['x2'] + case_df['x3'] > 2.5, 1, 0)
case_df[['y', 'x0', 'x1', 'x2']].head(80).to_csv(
"/opt/dataset/unit_test/train_guest.csv", index=True
)
case_df[['y', 'x0', 'x1', 'x2']].tail(20).to_csv(
"/opt/dataset/unit_test/test_guest.csv", index=True
)
case_df[['x3', 'x4']].head(80).to_csv(
"/opt/dataset/unit_test/train_host.csv", index=True
)
case_df[['x3', 'x4']].tail(20).to_csv(
"/opt/dataset/unit_test/test_host.csv", index=True
)
config_sync = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"write_training_prediction": True,
"write_validation_prediction": True,
"echo_training_metrics": True
},
"train_params": {
"global_epoch": 10,
"batch_size": 2048,
"encryption": {
"ckks": {
"poly_modulus_degree": 8192,
"coeff_mod_bit_sizes": [
60,
40,
40,
60
],
"global_scale_bit_size": 40
}
},
"optimizer": {
"lr": 0.01,
"p": 2,
"alpha": 1e-4
},
"early_stopping": {
"key": "acc",
"patience": 10,
"delta": 0
},
"random_seed": None
}
}
}
@pytest.fixture()
def get_label_trainer_conf():
with open("python/algorithm/config/vertical_logistic_regression/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
conf["train_info"]["interaction_params"]["save_frequency"] = -1
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/vertical_logistic_regression/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_host.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
# conf["train_info"]["interaction_params"]["save_frequency"] = -1
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
service.fed_node.FedNode.node_name = 'node-1'
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
# if not os.path.exists("/opt/config/unit_test"):
# os.makedirs("/opt/config/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
# if os.path.exists("/opt/config/unit_test"):
# shutil.rmtree("/opt/config/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestLogisticRegression:
@pytest.mark.parametrize("encryption_method, p", [
("ckks", 1), ("paillier", 1), ("plain", 1), ("other", 1),
("ckks", 0), ("paillier", 0), ("plain", 0),
("ckks", 2), ("paillier", 2), ("plain", 2), ("ckks", 3)
])
def test_label_trainer(self, get_label_trainer_conf, p, encryption_method, mocker):
# label trainer 流程测试
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
lrt = VerticalLogisticRegressionLabelTrainer(get_label_trainer_conf)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=0
)
if encryption_method == "paillier":
lrt.encryption_config = {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
}
elif encryption_method == "plain":
lrt.encryption_config = {
"plain": {}
}
elif encryption_method == "ckks":
pass
else:
lrt.encryption_config = {
encryption_method: {}
}
encryption_config = lrt.encryption_config
if encryption_method == "ckks":
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config[encryption_method]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config[encryption_method]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config[
encryption_method]["global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
public_context = ts.context_from(serialized_public_context)
elif encryption_method == "paillier":
private_context = Paillier.context(
encryption_config[encryption_method]["key_bit_size"],
djn_on=encryption_config[encryption_method]["djn_on"])
public_context = private_context.to_public().serialize()
public_context = Paillier.context_from(public_context)
def mock_collect(*args, **kwargs):
if mock_channel_collect.call_count <= 1:
return [2]
if encryption_method == "ckks":
if mock_channel_collect.call_count > 10:
return []
if mock_channel_collect.call_count % 3 == 2:
return [torch.tensor(np.zeros([80, 1]))]
elif mock_channel_collect.call_count % 3 == 0:
pred_residual = torch.tensor(np.random.random(2))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
pred_residual = ts.ckks_vector_from(
public_context, serialized_enc_pred_residual)
return [pred_residual.serialize()]
else:
return [torch.tensor(np.zeros([20, 1]))]
elif encryption_method == "paillier":
return []
elif encryption_method == "plain":
if mock_channel_collect.call_count >= 10:
return []
if mock_channel_collect.call_count % 2 == 0:
return [torch.tensor(np.zeros([80, 1]))]
else:
return [torch.tensor(np.zeros([20, 1]))]
else:
pass
mock_channel_collect = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
lrt.optimizer_config['p'] = p
if encryption_method not in ("ckks", "paillier", "plain"):
msg = f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'ckks', 'plain'."
with pytest.raises(ValueError) as e:
lrt.fit()
exec_msg = e.value.args[0]
assert exec_msg == msg
elif p not in (0, 1, 2):
with pytest.raises(NotImplementedError) as e:
lrt.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Regular P={} not implement.".format(p)
else:
lrt.fit()
self.check_model_output()
@pytest.mark.parametrize("encryption_method, p", [
("ckks", 1), ("paillier", 1), ("plain", 1), ("other", 1),
("ckks", 0), ("paillier", 0), ("plain", 0),
("ckks", 2), ("paillier", 2), ("plain", 2), ("ckks", 3)
])
def test_trainer(self, get_trainer_conf, encryption_method, p, mocker):
# trainer 流程测试
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
def mock_func(*args, **kwargs):
if mock_broadcast_recv.call_count == 1:
return copy.deepcopy(config_sync)
elif mock_broadcast_recv.call_count == 2:
return 50
else:
return 0
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
lrt = VerticalLogisticRegressionTrainer(get_trainer_conf)
if encryption_method == "paillier":
lrt.encryption_config = {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
}
elif encryption_method == "plain":
lrt.encryption_config = {
"plain": {}
}
elif encryption_method == "ckks":
pass
else:
lrt.encryption_config = {
encryption_method: {}
}
encryption_config = lrt.encryption_config
if encryption_method == "ckks":
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config[encryption_method]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config[encryption_method]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config[
encryption_method]["global_scale_bit_size"]
elif encryption_method == "paillier":
num_cores = - \
1 if encryption_config[encryption_method]["parallelize_on"] else 1
private_context = Paillier.context(
encryption_config[encryption_method]["key_bit_size"],
djn_on=encryption_config[encryption_method]["djn_on"])
def mock_predict_residual(*args, **kwargs):
if encryption_method == "ckks":
if mock_channel_recv.call_count <= 1:
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
return serialized_public_context
elif mock_channel_recv.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(80))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
return serialized_enc_pred_residual
elif mock_channel_recv.call_count % 3 == 0:
return np.random.random(2)
else:
return False, False, -1
elif encryption_method == "paillier":
if mock_channel_recv.call_count <= 1:
return private_context.to_public().serialize()
elif mock_channel_recv.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(80))
enc_pred_residual = Paillier.encrypt(
private_context,
pred_residual.numpy().astype(np.float32).flatten(),
precision=encryption_config["paillier"]["precision"],
obfuscation=True,
num_cores=num_cores
)
return Paillier.serialize(enc_pred_residual)
elif mock_channel_recv.call_count % 3 == 0:
return np.random.random(2)
else:
return False, False, -1
elif encryption_method == "plain":
if mock_channel_recv.call_count % 2 == 1:
return torch.tensor(np.random.random((80, 1)), dtype=torch.float)
else:
return False, False, -1
mock_channel_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_predict_residual
)
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
lrt.optimizer_config['p'] = p
if encryption_method not in ("ckks", "paillier", "plain"):
msg = f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'ckks', 'plain'."
with pytest.raises(ValueError) as e:
lrt.fit()
exec_msg = e.value.args[0]
assert exec_msg == msg
elif p not in (0, 1, 2):
with pytest.raises(NotImplementedError) as e:
lrt.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Regular P={} not implement.".format(p)
else:
lrt.fit()
self.check_model_output()
@pytest.mark.parametrize("encryption_method", ["ckks"])
def test_early_stopping(self, get_label_trainer_conf, get_trainer_conf, encryption_method, mocker):
# 早停测试
mocker.patch("service.fed_control._send_progress")
get_label_trainer_conf["train_info"]["train_params"]["early_stopping"]["patience"] = 1
get_label_trainer_conf["train_info"]["train_params"]["early_stopping"]["delta"] = 1e-3
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
lrt = VerticalLogisticRegressionLabelTrainer(get_label_trainer_conf)
encryption_config = lrt.encryption_config
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config["ckks"]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config["ckks"]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config["ckks"]["global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
public_context = ts.context_from(serialized_public_context)
def mock_collect(*args, **kwargs):
if encryption_method == "ckks":
print(mock_channel_collect.call_count)
if mock_channel_collect.call_count >= 8:
return []
if mock_channel_collect.call_count % 3 == 1:
return [torch.tensor(np.zeros([80, 1]))]
elif mock_channel_collect.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(2))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
pred_residual = ts.ckks_vector_from(
public_context, serialized_enc_pred_residual)
return [pred_residual.serialize()]
else:
return [torch.tensor(np.zeros([20, 1]))]
elif encryption_method == "paillier":
return []
mock_channel_collect = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
mocker.patch.object(
lrt, "check_data", return_value=None
)
lrt.fit()
def mock_func(*args, **kwargs):
if mock_broadcast_recv.call_count == 1:
return copy.deepcopy(config_sync)
elif mock_broadcast_recv.call_count == 2:
return 50
else:
return 0
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
trainer = VerticalLogisticRegressionTrainer(get_trainer_conf)
encryption_config = trainer.encryption_config
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config["ckks"]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config["ckks"]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config["ckks"]["global_scale_bit_size"]
def mock_predict_residual(*args, **kwargs):
if mock_channel_recv.call_count <= 1:
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
return serialized_public_context
elif mock_channel_recv.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(80))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
return serialized_enc_pred_residual
elif mock_channel_recv.call_count % 3 == 0:
return np.random.random(2)
else:
return True, True, 1
mock_channel_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_predict_residual
)
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
trainer.fit()
@pytest.mark.parametrize("encryption_method", ["ckks"])
def test_save_frequency(self, get_label_trainer_conf, get_trainer_conf, encryption_method, mocker):
# 测试模型留存频率参数是否生效
mocker.patch("service.fed_control._send_progress")
get_label_trainer_conf["train_info"]["interaction_params"]["save_frequency"] = 1
get_trainer_conf["train_info"]["interaction_params"] = {}
get_trainer_conf["train_info"]["interaction_params"]["save_frequency"] = 1
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
def mock_collect(*args, **kwargs):
if encryption_method == "ckks":
if mock_channel_collect.call_count > 9:
return []
if mock_channel_collect.call_count % 3 == 1:
return [torch.tensor(np.zeros([80, 1]))]
elif mock_channel_collect.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(2))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
pred_residual = ts.ckks_vector_from(
public_context, serialized_enc_pred_residual)
return [pred_residual.serialize()]
else:
return [torch.tensor(np.zeros([20, 1]))]
elif encryption_method == "paillier":
return []
mock_channel_collect = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
lrt = VerticalLogisticRegressionLabelTrainer(get_label_trainer_conf)
encryption_config = lrt.encryption_config
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config["ckks"]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config["ckks"]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config["ckks"]["global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
public_context = ts.context_from(serialized_public_context)
mocker.patch.object(
lrt, "check_data", return_value=None
)
lrt.fit()
def mock_func(*args, **kwargs):
if mock_broadcast_recv.call_count == 1:
return config_sync
elif mock_broadcast_recv.call_count == 2:
return 50
else:
return 0
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
trainer = VerticalLogisticRegressionTrainer(get_trainer_conf)
encryption_config = trainer.encryption_config
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config["ckks"]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config["ckks"]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config["ckks"]["global_scale_bit_size"]
def mock_predict_residual(*args, **kwargs):
if mock_channel_recv.call_count <= 1:
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
return serialized_public_context
elif mock_channel_recv.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(80))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
return serialized_enc_pred_residual
elif mock_channel_recv.call_count % 3 == 0:
return np.random.random(2)
else:
return False, False, -1
mock_channel_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_predict_residual
)
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
trainer.fit()
@pytest.mark.parametrize("encryption_method", ["ckks"])
def test_save_path(self, get_label_trainer_conf, encryption_method, mocker):
# 假如留存目录不存在,是否会自动创建完成运行\
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=0
)
get_label_trainer_conf["output"]["path"] = "/opt/checkpoints/unit_test_2"
def mock_collect(*args, **kwargs):
if encryption_method == "ckks":
if mock_channel_collect.call_count > 9:
return []
if mock_channel_collect.call_count % 3 == 1:
return [torch.tensor(np.zeros([80, 1]))]
elif mock_channel_collect.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(2))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
pred_residual = ts.ckks_vector_from(
public_context, serialized_enc_pred_residual)
return [pred_residual.serialize()]
else:
return [torch.tensor(np.zeros([20, 1]))]
elif encryption_method == "paillier":
return []
mock_channel_collect = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
lrt = VerticalLogisticRegressionLabelTrainer(get_label_trainer_conf)
encryption_config = lrt.encryption_config
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config["ckks"]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config["ckks"]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config["ckks"]["global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
public_context = ts.context_from(serialized_public_context)
mocker.patch.object(
lrt, "check_data", return_value=None
)
lrt.fit()
shutil.rmtree("/opt/checkpoints/unit_test_2")
@staticmethod
def check_model_output():
# 检查是否正常输出了model_config.json
assert os.path.exists("/opt/checkpoints/unit_test/model_config.json")
with open("/opt/checkpoints/unit_test/model_config.json") as f:
model_config = json.load(f)
# 检查model_config.json的stage是否符合预期
assert model_config[-1]["class_name"] == "VerticalLogisticRegression"
filename = "/opt/checkpoints/unit_test/" + model_config[0]["filename"][:-5] +'.pmodel'
dim = model_config[-1]["input_dim"]
bias = model_config[-1]["bias"]
if bias:
assert dim == 3
else:
assert dim == 2
# 检查是否写出了模型文件,模型文件是否合法
assert os.path.exists(filename)
with open(filename, 'rb') as f:
byte_str = f.read()
m = LinearModel()
m.ParseFromString(byte_str)
model = json_format.MessageToDict(m,
including_default_value_fields=True,
preserving_proto_field_name=True)
assert len(model["state_dict"]["weight"]) == dim
if bias:
assert "bias" in model["state_dict"]
else:
assert model["state_dict"].get("bias", 0.0) == 0.0
| 30,427 | 39.952894 | 120 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_sampler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import service.fed_config
from algorithm.framework.vertical.sampler.label_trainer import VerticalSamplerLabelTrainer
from algorithm.framework.vertical.sampler.trainer import VerticalSamplerTrainer
from common.communication.gRPC.python.channel import BroadcastChannel
from common.communication.gRPC.python.commu import Commu
@pytest.fixture(scope="module", autouse=True)
def env():
# 准备目录
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
# 测试用例
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': np.random.random(1000)
})
case_df['y'] = np.where(
case_df['x0'] + case_df['x2'] + case_df['x3'] > 2.5, 1, 0)
case_df[['y', 'x0', 'x1', 'x2']].to_csv(
"/opt/dataset/unit_test/guest.csv", index=True, index_label='id')
case_df[['x3', 'x4']].to_csv(
"/opt/dataset/unit_test/host.csv", index=True, index_label='id')
yield
# 清除测试数据
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
@pytest.fixture()
def get_label_trainer_conf():
with open("python/algorithm/config/vertical_sampler/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["dataset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["dataset"][0]["name"] = "guest.csv"
conf["input"]["dataset"][0]["has_id"] = True
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/vertical_sampler/trainer.json") as f:
conf = json.load(f)
conf["input"]["dataset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["dataset"][0]["name"] = "host.csv"
conf["input"]["dataset"][0]["has_id"] = True
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
class TestVerticalSampler:
@pytest.mark.parametrize('datatype, fraction', [("csv", {"percentage": 1}), ("json", {"percentage": 1}),
("csv", {"wrong_key": 1})])
def test_label_trainer_default(self, get_label_trainer_conf, datatype, fraction, mocker):
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
conf = copy.deepcopy(get_label_trainer_conf)
conf["train_info"]["train_params"]["marketing_specified"] = {}
conf["input"]["dataset"][0]["type"] = datatype
conf["train_info"]["train_params"]["fraction"] = fraction
if datatype == "csv":
if fraction == {"percentage": 1}:
ls = VerticalSamplerLabelTrainer(conf)
assert len(ls.data) == 1000
ls.fit()
assert os.path.exists(
"/opt/checkpoints/unit_test/temp/sampled_data_[STAGE_ID].csv")
if ls.save_id:
assert os.path.exists(
"/opt/checkpoints/unit_test/temp/sampled_id_[STAGE_ID].json")
elif fraction == {"wrong_key": 1}:
with pytest.raises(NotImplementedError) as e:
ls = VerticalSamplerLabelTrainer(conf)
ls.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Fraction key {} is not supported.".format(
list(ls.fraction.keys())[0])
else:
with pytest.raises(NotImplementedError) as e:
ls = VerticalSamplerLabelTrainer(conf)
exec_msg = e.value.args[0]
assert exec_msg == "Dataset type {} is not supported.".format(
ls.input["dataset"][0]["type"])
# test more than one data config
conf1 = copy.deepcopy(get_label_trainer_conf)
conf1["input"]["dataset"] = [{}, {}]
try:
VerticalSamplerLabelTrainer(conf1)
except Exception:
pass
# test no data config
conf2 = copy.deepcopy(get_label_trainer_conf)
conf2["input"]["dataset"] = []
with pytest.raises(NotImplementedError) as e:
ls = VerticalSamplerLabelTrainer(conf2)
exec_msg = e.value.args[0]
assert exec_msg == "Dataset was not configured."
@pytest.mark.parametrize('method, strategy, fraction, infer_params', [
("random", "downsample", {"percentage": 0.1}, {
"threshold_method": "percentage", "threshold": 0.1}),
("random", "downsample", {"number": 10}, {
}), ("random", "upsample", {"percentage": 1.1}, {}),
("random", "downsample", {"percentage": 1.2}, {
}), ("random", "upsample", {"percentage": -0.1}, {}),
("random", "sample", {"percentage": 0.1}, {}), ("stratify", "downsample",
{"labeled_percentage": [[0, 0.1], [1, 0.2]]}, {}),
("stratify", "upsample", {
"labeled_percentage": [[0, 1.1], [1, 1.2]]}, {}),
("stratify", "sample", {"percentage": 0.1}, {}),
("try", "downsample", {"percentage": 0.1}, {})
])
def test_label_trainer_fit(self, get_label_trainer_conf, method, strategy, fraction, mocker, infer_params):
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
conf = copy.deepcopy(get_label_trainer_conf)
conf["train_info"]["train_params"]["method"] = method
conf["train_info"]["train_params"]["strategy"] = strategy
conf["train_info"]["train_params"]["fraction"] = fraction
conf["train_info"]["train_params"]["marketing_specified"] = infer_params
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
ls = VerticalSamplerLabelTrainer(conf)
if method == "random" and infer_params == {}:
if strategy == 'downsample' and fraction == {"percentage": 0.1}:
ls.fit()
assert len(ls.sample_ids) == int(fraction * 1000)
elif strategy == 'downsample' and fraction == {"number": 10}:
ls.fit()
assert len(ls.sample_ids) == 10
elif strategy == 'upsample' and fraction == {"percentage": 1.1}:
ls.fit()
assert len(ls.sample_ids) == int(fraction["percentage"] * 1000)
assert len(set(ls.sample_ids)) < len(ls.sample_ids)
elif strategy == 'downsample' and {"percentage": 1.2}:
with pytest.raises(ValueError) as e:
ls.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Fraction should be a numeric number between 0 and 1"
elif strategy == 'upsample' and fraction == {"percentage": -0.1}:
with pytest.raises(ValueError) as e:
ls.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Fraction should be a numeric number larger than 0"
else:
with pytest.raises(NotImplementedError) as e:
ls.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Strategy type {} is not supported.".format(
ls.strategy)
elif method == "random" and infer_params != {}:
ls.data = ls.data[["y"]]
ls.fit()
assert len(ls.sample_ids) == int(len(ls.data) * ls.threshold)
conf1 = copy.deepcopy(conf)
conf1["train_info"]["train_params"]["marketing_specified"]["threshold_method"] = "number"
conf1["train_info"]["train_params"]["marketing_specified"]["threshold"] = 100
ls1 = VerticalSamplerLabelTrainer(conf1)
ls1.data = ls1.data[["y"]]
ls1.fit()
assert len(ls1.sample_ids) == 100
conf1_1 = copy.deepcopy(conf1)
conf1_1["train_info"]["train_params"]["marketing_specified"]["threshold"] = 10000
ls1_1 = VerticalSamplerLabelTrainer(conf1_1)
ls1_1.data = ls1_1.data[["y"]]
with pytest.raises(OverflowError) as e:
ls1_1.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Threshold number {} is larger than input data size.".format(
ls1_1.threshold)
conf2 = copy.deepcopy(conf)
conf2["train_info"]["train_params"]["marketing_specified"]["threshold_method"] = "score"
conf2["train_info"]["train_params"]["marketing_specified"]["threshold"] = 0.5
ls2 = VerticalSamplerLabelTrainer(conf2)
ls2.data = ls2.data[["y"]]
ls2.fit()
assert len(ls2.sample_ids) == np.sum(ls2.data)[0]
elif method == "stratify" and infer_params == {}:
if strategy == 'downsample' and fraction == {"labeled_percentage": [[0, 0.1], [1, 0.2]]}:
ls.fit()
assert len(ls.sample_ids) == int(
ls.label_count[0] * 0.1) + int(ls.label_count[1] * 0.2)
elif strategy == 'upsample' and fraction == {"labeled_percentage": [[0, 1.1], [1, 1.2]]}:
ls.fit()
assert len(ls.sample_ids) == int(
ls.label_count[0] * 1.1) + int(ls.label_count[1] * 1.2)
assert len(set(ls.sample_ids)) < len(ls.sample_ids)
else:
with pytest.raises(NotImplementedError) as e:
ls.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Strategy type {} is not supported.".format(
ls.strategy)
else:
with pytest.raises(NotImplementedError) as e:
ls.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Method type {} is not supported.".format(
ls.method)
def test_trainer(self, get_label_trainer_conf, mocker):
conf = copy.deepcopy(get_label_trainer_conf)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
ls = VerticalSamplerTrainer(conf)
def mock_recv(*args, **kwargs):
num = int(conf["train_info"]["train_params"]
["fraction"]["percentage"] * len(ls.data))
return ls.data.index[:num]
mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_recv
)
ls.fit()
| 12,300 | 43.568841 | 111 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.