repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
XFL | XFL-master/python/algorithm/core/data_io.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
class CsvReader(object):
def __init__(self,
path: str,
has_id: bool = True,
has_label: bool = True):
index_col = 0 if has_id else False
self.table: pd.DataFrame = pd.read_csv(path, index_col=index_col)
self.ids = self.table.index.to_numpy()
self.table.reset_index(drop=True, inplace=True)
self.has_id = has_id
self.has_label = has_label
self.label_col = 0 if has_label else -1
def features(self, type: str = "numpy.ndarray"):
if type == "numpy.ndarray":
return self.table.iloc[:, self.label_col + 1:].to_numpy().astype(np.float32)
else: # pandas.dataframe
return self.table.iloc[:, self.label_col + 1:]
def label(self, type: str = "numpy.ndarray"):
if self.label_col == 0:
if type == "numpy.ndarray":
return self.table.iloc[:, 0].to_numpy().astype(np.float32)
else:
return self.table.iloc[:, 0]
else:
return None
def col_names(self):
return self.table.columns.tolist()
def feature_names(self):
index = 0
if self.has_label:
index += 1
return self.table.columns.tolist()[index:]
def label_name(self):
if self.label_col == 0:
return self.table.columns.tolist()[0]
else:
return None
class NpzReader(object):
def __init__(self,
path: str):
self.data = np.load(path, allow_pickle=True)
def features(self):
return self.data["data"].astype(float)
def label(self):
return self.data["labels"].astype(float)
class NdarrayIterator():
def __init__(self, data: np.ndarray, batch_size: int):
self.data = data
self.bs = batch_size
self.index = 0
def __len__(self):
return len(self.data)
def __iter__(self):
return self
def __next__(self):
if self.index < len(self.data):
data = self.data[self.index: self.index + self.bs]
self.index += self.bs
return data
else:
self.index = 0
raise StopIteration
class QADataset(Dataset):
def __init__(self,
file_name_or_path,
tokenizer,
max_src_length=200,
max_dst_length=500,
prompt_pattern="{}:\n问:{}\n答:",
key_query='input',
key_answer='output'):
super().__init__()
if os.path.isdir(file_name_or_path):
data = []
for file_name in os.listdir(file_name_or_path):
with open(os.path.join(file_name_or_path, file_name), 'r') as fp:
content = json.load(fp)
instruction = content["instruction"]
instances = content["instances"]
for item in instances:
data.append(
{
"Q": prompt_pattern.format(instruction, item[key_query]),
"A": item[key_answer]
}
)
elif os.path.isfile(file_name_or_path):
data = []
with open(file_name_or_path, 'r') as fp:
content = json.load(fp)
instruction = content["instruction"]
instances = content["instances"]
for item in instances:
data.append(
{
"Q": prompt_pattern.format(instruction, item[key_query]),
"A": item[key_answer]
}
)
else:
raise ValueError(f"Dataset path {file_name_or_path} is not a dir or a file name.")
self.data = data
self.tokenizer = tokenizer
# self.prefix = prefix
self.max_src_length = max_src_length
self.max_dst_length = max_dst_length
self.key_query = key_query
self.key_answer = key_answer
def __len__(self):
return len(self.data)
def __getitem__(self, index):
query, answer = self.data[index]["Q"], self.data[index]["A"]
src_ids = self.tokenizer.encode(text=query, max_length=self.max_src_length, truncation=True)
dst_ids = self.tokenizer.encode(text=answer, max_length=self.max_dst_length, truncation=True, add_special_tokens=False)
input_ids = src_ids + dst_ids + [self.tokenizer.eos_token_id]
labels = [-100] * len(src_ids) + dst_ids + [self.tokenizer.eos_token_id]
return {"input_ids": input_ids, "labels": labels}
# class QADataset(Dataset):
# def __init__(self,
# file_name_or_path,
# tokenizer,
# max_src_length=200,
# max_dst_length=500,
# ignore_pad_token_for_loss=True,
# prompt_pattern="{}:\n问:{}\n答:",
# key_query='input',
# key_answer='output'):
# super().__init__()
# if os.path.isdir(file_name_or_path):
# data = []
# for file_name in os.listdir(file_name_or_path):
# with open(os.path.join(file_name_or_path, file_name), 'r') as fp:
# content = json.load(fp)
# instruction = content["instruction"]
# instances = content["instances"]
# for item in instances:
# data.append(
# {
# "Q": prompt_pattern.format(instruction, item[key_query]),
# "A": item[key_answer]
# }
# )
# elif os.path.isfile(file_name_or_path):
# data = []
# with open(file_name_or_path, 'r') as fp:
# content = json.load(fp)
# instruction = content["instruction"]
# instances = content["instances"]
# for item in instances:
# data.append(
# {
# "Q": prompt_pattern.format(instruction, item[key_query]),
# "A": item[key_answer]
# }
# )
# else:
# raise ValueError(f"Dataset path {file_name_or_path} is not a dir or a file name.")
# self.data = data
# self.tokenizer = tokenizer
# # self.prefix = prefix
# self.max_src_length = max_src_length
# self.max_dst_length = max_dst_length
# self.ignore_pad_token_for_loss = ignore_pad_token_for_loss
# self.key_query = key_query
# self.key_answer = key_answer
# def __len__(self):
# return len(self.data)
# def __getitem__(self, index):
# query, answer = self.data[index]["Q"], self.data[index]["A"]
# # prompt_ids = tokenizer.encode(prompt, max_length=max_seq_length, truncation=True)
# # target_ids = tokenizer.encode(
# # target,
# # max_length=max_seq_length,
# # truncation=True,
# # add_special_tokens=False)
# # input_ids = prompt_ids + target_ids + [config.eos_token_id]
# src_ids = self.tokenizer.encode(text=query, add_special_tokens=False)
# dst_ids = self.tokenizer.encode(text=answer, add_special_tokens=False)
# if len(src_ids) > self.max_src_length - 1:
# src_ids = src_ids[: self.max_src_length - 1]
# if len(dst_ids) > self.max_dst_length - 2:
# dst_ids = dst_ids[: self.max_dst_length - 2]
# input_ids = self.tokenizer.build_inputs_with_special_tokens(src_ids, dst_ids)
# context_length = input_ids.index(self.tokenizer.bos_token_id)
# mask_position = context_length - 1
# labels = [-100] * context_length + input_ids[mask_position+1:]
# # from original project code, is it necessary?
# max_seq_length = self.max_src_length + self.max_dst_length
# pad_len = max_seq_length - len(input_ids)
# input_ids += [self.tokenizer.pad_token_id] * pad_len
# labels += [self.tokenizer.pad_token_id] * pad_len
# if self.ignore_pad_token_for_loss:
# labels = [(l if l != self.tokenizer.pad_token_id else -100) for l in labels]
# out = {
# "input_ids": input_ids,
# "labels": labels
# }
# return out
# class QADataset(Dataset):
# """
# [
# {
# "Q": "",
# "A": ""
# }
# ]
# """
# def __init__(self,
# file_name_or_path,
# tokenizer,
# max_src_length=200,
# max_dst_length=500,
# ignore_pad_token_for_loss=True,
# key_query='input',
# key_answer='output'):
# super().__init__()
# if os.path.isdir(file_name_or_path):
# data = []
# for file_name in os.listdir(file_name_or_path):
# with open(os.path.join(file_name_or_path, file_name), 'r') as fp:
# content = json.load(fp)
# instruction = content["instruction"]
# instances = content["instances"]
# for item in instances:
# data.append(
# {
# key_query: "{}:\n问:{}\n答:".format(instruction, item[key_query]),
# key_answer: item[key_answer]
# }
# )
# elif os.path.isfile(file_name_or_path):
# data = []
# with open(file_name_or_path, 'r') as fp:
# content = json.load(fp)
# instruction = content["instruction"]
# instances = content["instances"]
# for item in instances:
# data.append(
# {
# key_query: "{}:\n问:{}\n答:".format(instruction, item["input"]),
# key_answer: item["output"]
# }
# )
# else:
# raise ValueError(f"Dataset path {file_name_or_path} is not a dir or a file name.")
# self.data = data
# self.tokenizer = tokenizer
# # self.prefix = prefix
# self.max_src_length = max_src_length
# self.max_dst_length = max_dst_length
# self.ignore_pad_token_for_loss = ignore_pad_token_for_loss
# self.key_query = key_query
# self.key_answer = key_answer
# def __len__(self):
# return len(self.data)
# def __getitem__(self, index):
# query, answer = self.data[index][self.key_query], self.data[index][self.key_answer]
# # if self.prefix:
# # query = self.prefix + query
# src_ids = self.tokenizer.encode(text=query, add_special_tokens=False)
# dst_ids = self.tokenizer.encode(text=answer, add_special_tokens=False)
# if len(src_ids) > self.max_src_length - 1:
# src_ids = src_ids[: self.max_src_length - 1]
# if len(dst_ids) > self.max_dst_length - 2:
# dst_ids = dst_ids[: self.max_dst_length - 2]
# input_ids = self.tokenizer.build_inputs_with_special_tokens(src_ids, dst_ids)
# context_length = input_ids.index(self.tokenizer.bos_token_id)
# mask_position = context_length - 1
# labels = [-100] * context_length + input_ids[mask_position+1:]
# # from original project code, is it necessary?
# max_seq_length = self.max_src_length + self.max_dst_length
# pad_len = max_seq_length - len(input_ids)
# input_ids += [self.tokenizer.pad_token_id] * pad_len
# labels += [self.tokenizer.pad_token_id] * pad_len
# if self.ignore_pad_token_for_loss:
# labels = [(l if l != self.tokenizer.pad_token_id else -100) for l in labels]
# out = {
# "input_ids": input_ids,
# "labels": labels
# }
# return out
# def collate_fn_for_qa(batch):
# input_ids = []
# # attention_mask = []
# labels = []
# # position_ids = []
# for obj in batch:
# input_ids.append(obj['input_ids'])
# labels.append(obj['labels'])
# # attention_mask.append(obj['attention_mask'])
# # position_ids.append(obj['position_ids'])
# return {
# 'input_ids': torch.stack(input_ids),
# 'attention_mask': torch.stack(attention_mask),
# 'labels': torch.stack(labels),
# 'position_ids':torch.stack(position_ids)
# } | 13,829 | 36.177419 | 127 | py |
XFL | XFL-master/python/algorithm/core/metrics.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
import numpy as np
from sklearn import metrics as sklearn_metrics
from common.xregister import xregister
from common.utils.auto_descriptor.torch.metrics import metrics
metric_dict = {
# "accuracy": "accuracy_score",
"acc": "accuracy_score",
"precision": "precision_score",
"recall": "recall_score",
"auc": "roc_auc_score",
"mape": "mean_absolute_percentage_error",
"mse": "mean_squared_error",
"mae": "mean_absolute_error",
"r2": "r2_score",
"median_ae": "median_absolute_error",
"rmse": "root_mean_squared_error"
}
def get_metric(name: str):
if name in metric_dict.keys():
name = metric_dict[name]
else:
name = name
if name in dir(sklearn_metrics):
metric = getattr(sklearn_metrics, name)
elif name in dir(sys.modules[__name__]):
metric = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
metric = xregister(name)
else:
raise ValueError(f"Metric {name} is not defined.")
return metric
# def list_metrics():
# names = set(metric_dict.keys()) + \
# set(metrics.keys()) - set(metric_dict.values()) + \
# set(dir(sys.modules[__name__])) + \ # 不太对,多了
# set(xregister.registered_object.keys())
# return names
def ks(y_true, y_pred):
fpr, tpr, _ = sklearn_metrics.roc_curve(y_true, y_pred)
ks = max(np.max(tpr - fpr), 0)
return ks
def root_mean_squared_error(y_true, y_pred):
mse_value = sklearn_metrics.mean_squared_error(y_true, y_pred)
rmse_value = math.sqrt(mse_value)
return rmse_value
# if __name__ == "__main__":
# print(dir(sys.modules[__name__]))
| 2,348 | 29.506494 | 74 | py |
XFL | XFL-master/python/algorithm/core/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/core/output.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
class TableSaver():
def __init__(self, path: str, name: Optional[str] = None):
if name is None:
splitted_path = path.split("/")
self.path = '/'.join(splitted_path[:-1])
self.name = splitted_path[-1]
else:
self.path = path
self.name = name
def save(self,
epoch: int,
data: dict,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
append: bool = True):
name = ['.'.join(self.name.split('.')[:-1])]
f_ext = self.name.split('.')[-1]
if prefix is not None:
name = [prefix] + name
if suffix is not None:
name = name + [suffix]
name = '.'.join(['_'.join(name), f_ext])
output_path = os.path.join(self.path, name)
mode = 'a' if append else 'w'
if os.path.exists(output_path):
with open(output_path, mode) as f:
features = []
for k in data:
features.append("%.6g" % data[k])
f.write("%d,%s\n" % (epoch, ','.join(features)))
f.close()
else:
with open(output_path, mode) as f:
f.write("%s,%s\n" % ("epoch", ','.join([_ for _ in data])))
features = []
for k in data:
features.append("%.6g" % data[k])
f.write("%d,%s\n" % (epoch, ','.join(features)))
f.close()
| 2,184 | 33.68254 | 75 | py |
XFL | XFL-master/python/algorithm/core/lr_scheduler/jax_lr_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import optax
from common.xregister import xregister
def get_lr_scheduler(name: str):
if name in dir(optax):
scheduler = getattr(optax, name)
elif name in dir(sys.modules[__name__]):
scheduler = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
scheduler = xregister(name)
else:
raise ValueError(f"Scheduler {name} is not supported in jax.")
return scheduler | 1,057 | 34.266667 | 74 | py |
XFL | XFL-master/python/algorithm/core/lr_scheduler/paddle_lr_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle.optimizer.lr as paddle_lr_scheduler
from common.xregister import xregister
def get_lr_scheduler(name: str):
if name in dir(paddle_lr_scheduler):
scheduler = getattr(paddle_lr_scheduler, name)
elif name in dir(sys.modules[__name__]):
scheduler = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
scheduler = xregister(name)
else:
raise ValueError(f"Scheduler {name} is not supported in torch.")
return scheduler | 1,124 | 36.5 | 74 | py |
XFL | XFL-master/python/algorithm/core/lr_scheduler/torch_lr_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import torch.optim.lr_scheduler as lr_scheduler
from common.xregister import xregister
def get_lr_scheduler(name: str):
if name in dir(lr_scheduler):
scheduler = getattr(lr_scheduler, name)
elif name in dir(sys.modules[__name__]):
scheduler = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
scheduler = xregister(name)
else:
raise ValueError(f"Scheduler {name} is not supported in torch.")
return scheduler | 1,108 | 35.966667 | 74 | py |
XFL | XFL-master/python/algorithm/core/lr_scheduler/tf_lr_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 605 | 39.4 | 74 | py |
XFL | XFL-master/python/algorithm/core/horizontal/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/core/horizontal/aggregation/aggregation_plain.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, OrderedDict, Tuple
import torch
import numpy as np
from service.fed_config import FedConfig
from .aggregation_base import AggregationRootBase, AggregationLeafBase
class AggregationPlainLeaf(AggregationLeafBase):
def __init__(self, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> None:
# super().__init__(sec_conf, root_id, leaf_ids)
super().__init__(sec_conf, root_id, FedConfig.node_id)
self.leaf_ids = leaf_ids or FedConfig.get_label_trainer() + FedConfig.get_trainer()
def _calc_upload_value(self, parameters: OrderedDict, parameters_weight: float) -> Tuple[OrderedDict, float]:
def f(x):
y = x[1]
if isinstance(x[1], torch.Tensor):
y = x[1].cpu()
return (x[0], y * parameters_weight)
weighted_parameters = OrderedDict(map(f, parameters.items()))
return (weighted_parameters, parameters_weight)
class AggregationPlainRoot(AggregationRootBase):
def __init__(self, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> None:
super().__init__(sec_conf, root_id, leaf_ids)
def _calc_aggregated_params(self, received_value: List, average=True) -> OrderedDict:
total_weight = sum([item[1] for item in received_value])
if self.initial_parameters is not None:
parameters = self.initial_parameters
else:
parameters = received_value[0][0]
for k in parameters.keys():
for item in received_value[1:]:
received_value[0][0][k] += item[0][k]
if received_value[0][0][k].dtype in [np.float32, np.float64]:
if average:
received_value[0][0][k] /= total_weight
elif received_value[0][0][k].dtype not in [torch.float32, torch.float64]:
ori_dtype = received_value[0][0][k].dtype
received_value[0][0][k] = received_value[0][0][k].to(dtype=torch.float32)
if average:
received_value[0][0][k] /= total_weight
received_value[0][0][k] = received_value[0][0][k].to(dtype=ori_dtype)
else:
if average:
received_value[0][0][k] /= total_weight
return received_value[0][0]
| 2,982 | 39.310811 | 113 | py |
XFL | XFL-master/python/algorithm/core/horizontal/aggregation/api.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
from .aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from .aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from service.fed_config import FedConfig
def _get_aggregation_inst(role: str, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> Union[AggregationPlainLeaf, AggregationPlainRoot]:
# def _get_aggregation_inst(role: str, sec_conf: dict, root_id: str, leaf_ids: list[str]) -> Union[AggregationPlainLeaf, AggregationPlainRoot]:
""" get a proper FedAvg instance. role: "label_trainer" or "assist_trainer"
"""
if not sec_conf or len(leaf_ids) == 1 or len(FedConfig.get_label_trainer() + FedConfig.get_trainer()) == 1:
method = "plain"
sec_conf = {}
else:
method = list(sec_conf.keys())[0]
sec_conf = sec_conf[method]
opt = {
"otp": {
"leaf": AggregationOTPLeaf,
"root": AggregationOTPRoot
},
"plain": {
"leaf": AggregationPlainLeaf,
"root": AggregationPlainRoot
}
}
try:
return opt[method][role](sec_conf, root_id, leaf_ids)
except KeyError as e:
raise KeyError("Combination of method {} and role {} is not supported for creating FedAvg instance".format(method, role)) from e
except Exception as e:
raise e
def get_aggregation_root_inst(sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> Union[AggregationPlainRoot, AggregationOTPRoot]:
return _get_aggregation_inst('root', sec_conf, root_id, leaf_ids)
def get_aggregation_leaf_inst(sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> Union[AggregationPlainLeaf, AggregationOTPLeaf]:
return _get_aggregation_inst('leaf', sec_conf, root_id, leaf_ids)
| 2,397 | 38.311475 | 151 | py |
XFL | XFL-master/python/algorithm/core/horizontal/aggregation/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/core/horizontal/aggregation/aggregation_base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import math
import pickle
from typing import List, OrderedDict, Tuple, Dict, Optional
from common.communication.gRPC.python.channel import DualChannel
from service.fed_config import FedConfig
from common.crypto.one_time_pad.component import (
OneTimeKey, OneTimePadContext, OneTimePadCiphertext
)
MAX_BLOCK_SIZE = 524288000 # 500M
MOV = b"@" # middle of value
EOV = b"&" # end of value
class AggregationLeafBase(object):
__metaclass__ = abc.ABCMeta
# def __init__(self, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> None:
def __init__(self, sec_conf: dict, root_id: str = '', leaf_id: str = '') -> None:
self.sec_conf = sec_conf
if root_id:
self.root_id = root_id
else:
self.root_id = FedConfig.get_assist_trainer()
if leaf_id:
self.leaf_id = leaf_id
else:
# self.leaf_ids = FedConfig.get_label_trainer() + FedConfig.get_trainer()
self.leaf_id = FedConfig.node_id
# self.aggregation_chan = BroadcastChannel(name='aggregation', ids=self.leaf_ids + [self.root_id],
# root_id=self.root_id, auto_offset=True)
self.aggregation_chann = DualChannel(name='aggregation_' + self.leaf_id,
ids=[self.root_id, self.leaf_id])
# self.seg_chan = BroadcastChannel(name='num_seg', ids=self.leaf_ids + [self.root_id],
# root_id=self.root_id, auto_offset=True)
@abc.abstractmethod
def _calc_upload_value(self, parameters: OrderedDict, parameters_weight: float) -> Tuple[OrderedDict, float]:
pass
def upload(self, parameters: OrderedDict, parameters_weight: float) -> int:
""" Send (parameters * parameters_weight, parameter_weight) to assist_trainer
"""
# response_codes = []
# value = self._calc_upload_value(parameters, parameters_weight)
# pickle_value = pickle.dumps(value)
# for seg in max_bytes_segementation(pickle_value):
# response_code = self.aggregation_chan.send(seg, use_pickle=False)
# response_codes.append(response_code)
# self.seg_chan.send(len(response_codes))
# return response_codes
# value = self._calc_upload_value(parameters, parameters_weight)
# return self.aggregation_chan.send(value)
response_codes = []
value = self._calc_upload_value(parameters, parameters_weight)
pickle_value = pickle.dumps(value)
for seg in max_bytes_segementation(pickle_value):
response_code = self.aggregation_chann.send(seg, use_pickle=False)
response_codes.append(response_code)
return int(any(response_codes))
def download(self) -> OrderedDict:
""" Receive global parameters from assist_trainer
"""
# pickle_params = b""
# num_seg = self.seg_chan.recv()
# for n in range(num_seg):
# pickle_params += self.aggregation_chan.recv(use_pickle=False)
# params = pickle.loads(pickle_params)
# return params
# params = self.aggregation_chan.recv()
# return params
pickle_params = bytes()
while True:
recv_value = self.aggregation_chann.recv(use_pickle=False)
pickle_params += recv_value[:-1]
if recv_value[-1] == EOV[0]:
break
elif recv_value[-1] == MOV[0]:
continue
params = pickle.loads(pickle_params)
return params
class AggregationRootBase(object):
__metaclass__ = abc.ABCMeta
def __init__(self, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> None:
self.sec_conf = sec_conf
self.initial_parameters = None
if root_id:
self.root_id = root_id
else:
self.root_id = FedConfig.get_assist_trainer()
if leaf_ids:
self.leaf_ids = leaf_ids
else:
self.leaf_ids = FedConfig.get_label_trainer() + FedConfig.get_trainer()
self.aggregation_channs: Dict[str, DualChannel] = {}
for id in self.leaf_ids:
self.aggregation_channs[id] = DualChannel(name='aggregation_' + id,
ids=[self.root_id, id])
# self.aggregation_chan = BroadcastChannel(name='aggregation', ids=self.leaf_ids + [self.root_id],
# root_id=self.root_id, auto_offset=True)
# self.seg_chan = BroadcastChannel(name='num_seg', ids=self.leaf_ids + [self.root_id],
# root_id=self.root_id, auto_offset=True)
@abc.abstractmethod
def _calc_aggregated_params(self, received_value: List, average) -> OrderedDict:
pass
def set_initial_params(self, params: OrderedDict) -> None:
self.initial_parameters = params
def aggregate(self,
average: bool = True,
parameters: Optional[OrderedDict] = None,
parameters_weight: Optional[float] = None) -> OrderedDict:
""" receive local gradient/weights from trainer, then calculate average gradient/weights.
"""
# received_values = []
# num_seg = self.seg_chan.collect()
# for n in range(num_seg[0]):
# if n == 0:
# received_values = self.aggregation_chan.collect(use_pickle=False)
# else:
# for n, value in enumerate(self.aggregation_chan.collect(use_pickle=False)):
# received_values[n] += value
# received_values = list(map(lambda x: pickle.loads(x), received_values))
# aggregated_params = self._calc_aggregated_params(received_values)
# return aggregated_params
received_values = []
# collect_flg = []
is_continue_flags = [True for party_id in self.aggregation_channs]
received_values = [bytes() for party_id in self.aggregation_channs]
while True:
# collect_values = self.aggregation_chan.collect(use_pickle=False)
for i, id in enumerate(self.leaf_ids):
if not is_continue_flags[i]:
continue
data = self.aggregation_channs[id].recv(use_pickle=False, wait=False)
if data is None:
continue
received_values[i] += data[:-1]
if data[-1] == EOV[0]:
received_values[i] = pickle.loads(received_values[i])
is_continue_flags[i] = False
flag = any(is_continue_flags)
if not flag:
break
# if len(collect_flg) == 0:
# collect_flg = [False for _ in range(len(collect_values))]
# if len(received_values) == 0:
# received_values = [bytes() for _ in range(len(collect_values))]
# for n, value in enumerate(collect_values):
# if value is None:
# continue
# received_values[n] += value[:-1]
# if value[-1] == EOV[0]:
# collect_flg[n] = True
# elif value[-1] == MOV[0]:
# continue
# if all(collect_flg):
# break
if parameters:
received_values.insert(0, (parameters, parameters_weight))
# received_values = list(map(lambda x: pickle.loads(x), received_values))
aggregated_params = self._calc_aggregated_params(received_values, average)
return aggregated_params
def broadcast(self, params: OrderedDict) -> int:
# br_status = []
# pickle_params = pickle.dumps(params)
# for seg in max_bytes_segementation(pickle_params):
# br_code = self.aggregation_chan.broadcast(seg, use_pickle=False)
# br_status.append(br_code)
# self.seg_chan.broadcast(len(br_status))
# return br_status
# return self.aggregation_chan.broadcast(params)
# br_status = []
# pickle_params = pickle.dumps(params)
# for seg in max_bytes_segementation(pickle_params):
# br_code = self.aggregation_chan.broadcast(seg, use_pickle=False)
# br_status.append(br_code)
br_status = []
pickle_params = pickle.dumps(params)
for seg in max_bytes_segementation(pickle_params):
br_codes = []
for id in self.leaf_ids:
br_code = self.aggregation_channs[id].send(seg, use_pickle=False)
br_codes.append(br_code)
br_status.append(any(br_codes))
return int(any(br_status))
def max_bytes_segementation(value):
# n = math.ceil(1.0 * len(value) / MAX_BLOCK_SIZE)
# for i in range(n):
# max_segement = value[i*MAX_BLOCK_SIZE: (i+1)*MAX_BLOCK_SIZE]
# yield max_segement
n = math.ceil(1.0 * len(value) / MAX_BLOCK_SIZE)
for i in range(n):
if i == n-1:
max_segement = value[i*MAX_BLOCK_SIZE: (i+1)*MAX_BLOCK_SIZE] + EOV
else:
max_segement = value[i*MAX_BLOCK_SIZE: (i+1)*MAX_BLOCK_SIZE] + MOV
yield max_segement | 10,121 | 40.483607 | 113 | py |
XFL | XFL-master/python/algorithm/core/horizontal/aggregation/aggregation_otp.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from functools import reduce
from itertools import combinations
from typing import Dict, List, OrderedDict, Tuple
import numpy as np
import torch
from common.communication.gRPC.python.commu import Commu
from common.crypto.csprng.drbg import get_drbg_inst
from common.crypto.csprng.drbg_base import DRBGBase
from common.crypto.key_agreement.diffie_hellman import DiffieHellman
from common.crypto.one_time_pad.component import OneTimePadContext, OneTimeKey, OneTimePadCiphertext
from common.crypto.one_time_pad.one_time_add import OneTimeAdd
from service.fed_config import FedConfig
from .aggregation_base import AggregationRootBase, AggregationLeafBase
# {
# "method": "otp",
# "key_bitlength": 128,
# "data_type": "torch.Tensor",
# "key_exchange": {
# "key_bitlength": 3072,
# "optimized": True
# },
# "csprng": {
# "name": "hmac_drbg",
# "method": "sha512",
# }
# }
def split_bytes(x: bytes, out_shape: Tuple[int]):
if len(out_shape) == 0:
return int.from_bytes(x, 'big')
elif len(out_shape) == 1:
a = len(x) // out_shape[0]
return [int.from_bytes(x[a * i: a * (i + 1)], 'big') for i in range(out_shape[0])]
else:
a = len(x) // out_shape[0]
return [split_bytes(x[a * i: a * (i + 1)], out_shape[1:]) for i in range(out_shape[0])]
class AggregationOTPLeaf(AggregationLeafBase):
def __init__(self, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> None:
# super().__init__(sec_conf, root_id, leaf_ids)
super().__init__(sec_conf, root_id, FedConfig.node_id)
self.leaf_ids = leaf_ids or FedConfig.get_label_trainer() + FedConfig.get_trainer()
leaf_pairs = combinations(self.leaf_ids, 2)
# key exchange
key_exchange_conf = sec_conf["key_exchange"]
df_protocols: Dict[str, DiffieHellman] = {}
for _leaf_ids in leaf_pairs:
if Commu.node_id in _leaf_ids:
df_protocol = DiffieHellman(list(_leaf_ids),
key_bitlength=key_exchange_conf['key_bitlength'],
optimized=key_exchange_conf["optimized"],
channel_name="otp_diffie_hellman")
df_protocols[df_protocol.chan.remote_id] = df_protocol
entropys: Dict[str, bytes] = {remote_id: None for remote_id in df_protocols}
# sequential
# for id in df_protocols:
# entropys[id] = df_protocols[id].exchange(out_bytes=True)
def func(id):
entropys[id] = df_protocols[id].exchange(out_bytes=True)
thread_list = []
for id in df_protocols:
task = threading.Thread(target=func, args=(id,))
thread_list.append(task)
for task in thread_list:
task.start()
for task in thread_list:
task.join()
# csprng
csprng_conf = sec_conf["csprng"]
self.csprngs: OrderedDict[str, DRBGBase] = OrderedDict()
self.is_addition = []
for remote_id in self.leaf_ids:
if remote_id != Commu.node_id:
self.csprngs[remote_id] = get_drbg_inst(name=csprng_conf["name"],
entropy=entropys[remote_id],
method=csprng_conf["method"],
nonce=b'',
additional_data=b'')
self.is_addition.append(Commu.node_id < remote_id)
# one-time-pad
self.otp_context = OneTimePadContext(modulus_exp=sec_conf["key_bitlength"],
data_type=sec_conf["data_type"])
def _calc_upload_value(self, parameters: OrderedDict, parameters_weight: float) -> Tuple[OrderedDict, float]:
# calculate total number of bytes of weights
def f(t):
return reduce(lambda x, y: x * y, t.shape, 1) * self.otp_context.modulus_exp // 8
num_bytes_array = list(map(f, parameters.values()))
csprng_generators = []
for remote_id in self.csprngs:
generator = self.csprngs[remote_id].generator(num_bytes=num_bytes_array,
additional_data=b'')
csprng_generators.append(generator)
weighted_parameters = OrderedDict()
encrypted_parameters = OrderedDict()
for k, v in parameters.items():
if isinstance(v, torch.Tensor):
v = v.cpu()
weighted_parameters[k] = v * float(parameters_weight)
# one_time_key = [np.array(split_bytes(bytes(next(g)), v.shape)) for g in csprng_generators]
one_time_key = []
for g in csprng_generators:
x = bytearray(next(g))
y = split_bytes(x, v.shape)
one_time_key.append(np.array(y))
one_time_key = OneTimeKey(one_time_key, self.otp_context.modulus_exp)
encrypted_parameters[k] = OneTimeAdd.encrypt(context_=self.otp_context,
data=weighted_parameters[k],
one_time_key=one_time_key,
is_addition=self.is_addition,
serialized=False)
return (encrypted_parameters, parameters_weight)
class AggregationOTPRoot(AggregationRootBase):
def __init__(self, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> None:
super().__init__(sec_conf, root_id, leaf_ids)
# # one-time-pad
# self.otp_context = OneTimePadContext(modulus_exp=sec_conf["key_bitlength"],
# data_type=sec_conf["data_type"])
# modulus_exp=sec_conf["key_bitlength"]
# self.dtype = np.uint64 if self.otp_context.modulus_exp == 64 else object
def _calc_aggregated_params(self, received_value: List, average=True) -> OrderedDict:
total_weight = sum([item[1] for item in received_value])
if self.initial_parameters is not None:
parameters = self.initial_parameters
else:
parameters = received_value[0][0]
all_cipher = True if isinstance(list(parameters.values())[0], OneTimePadCiphertext) else False
idx = 1 if all_cipher else 2
for k in parameters.keys():
for item in received_value[idx:]:
received_value[idx-1][0][k] += item[0][k]
received_value[idx-1][0][k] = received_value[idx-1][0][k].decode()
if average:
if all_cipher:
received_value[0][0][k] /= total_weight
else:
received_value[0][0][k] = (received_value[0][0][k] + received_value[1][0][k]) / total_weight
return received_value[0][0]
| 7,803 | 40.73262 | 113 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/hooker.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
class Hooker(object):
def __init__(self):
self.hooks = {}
self.context = {}
def register_hook(self, place: str, rank: int, func: object, desc: str = ''):
""" register hook.
Args:
place (str): hook place.
rank (int): execute rank with the same hook name.
func (object): function to register.
desc (str, optional): description of the function to register. Defaults to ''.
Raises:
ValueError: when rank of the hook place has been registered.
"""
if place not in self.hooks:
self.hooks[place] = {}
if rank in self.hooks[place]:
raise ValueError(
f"Rank {rank} of hook place {place} has already been registered.")
self.hooks[place][rank] = {}
self.hooks[place][rank]['func'] = func
self.hooks[place][rank]['desc'] = desc
def execute_hook_at(self, place: str) -> int:
""" execute functions registered by the hook place.
Args:
place (str): hook place.
Returns:
int: 1 represent needs break after the hook execution, else 0.
"""
hooks = self.hooks.get(place, {})
for rank in sorted(hooks):
if hooks[rank]['func'](self.context) == 1:
return 1
return 0
def declare_hooks(self, place: Union[str, list[str]]):
""" declare hooks.
Args:
place (Union[str, list[str]]): hooks place
"""
if isinstance(place, list):
for place in place:
if str(place) not in self.hooks:
self.hooks[str(place)] = {}
else:
if str(place) not in self.hooks:
self.hooks[str(place)] = {}
def declare_context(self, place: Union[str, list[str]]):
""" declare context place.
Args:
place (Union[str, list[str]]): place of context to declare.
"""
if isinstance(place, list):
for place in place:
if str(place) not in self.context:
self.context[str(place)] = None
else:
if str(place) not in self.context:
self.context[str(place)] = None
| 2,906 | 31.3 | 90 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/agg_type.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def register_agg_type_for_assist_trainer(trainer: object, framework: str, agg_type: str):
if framework == 'torch':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.torch.fedavg.assist_trainer import FedAvgAssistTrainer
FedAvgAssistTrainer(trainer).register()
elif agg_type == "fedprox":
from algorithm.core.horizontal.template.torch.fedprox.assist_trainer import FedProxAssistTrainer
FedProxAssistTrainer(trainer).register()
elif agg_type == "scaffold":
from algorithm.core.horizontal.template.torch.scaffold.assist_trainer import SCAFFOLDAssistTrainer
SCAFFOLDAssistTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg, fedprox, scaffold.")
elif framework == 'tensorflow':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.tensorflow.fedavg.assist_trainer import FedAvgAssistTrainer
FedAvgAssistTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg.")
elif framework == 'jax':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.jax.fedavg.assist_trainer import FedAvgAssistTrainer
FedAvgAssistTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg.")
def register_agg_type_for_label_trainer(trainer: object, framework: str, agg_type: str):
if framework == 'torch':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.torch.fedavg.label_trainer import FedAvgLabelTrainer
FedAvgLabelTrainer(trainer).register()
elif agg_type == "fedprox":
from algorithm.core.horizontal.template.torch.fedprox.label_trainer import FedProxLabelTrainer
FedProxLabelTrainer(trainer).register()
elif agg_type == "scaffold":
from algorithm.core.horizontal.template.torch.scaffold.label_trainer import SCAFFOLDLabelTrainer
SCAFFOLDLabelTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg, fedprox, scaffold.")
elif framework == 'tensorflow':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.tensorflow.fedavg.label_trainer import FedAvgLabelTrainer
FedAvgLabelTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg.")
elif framework == 'jax':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.jax.fedavg.label_trainer import FedAvgLabelTrainer
FedAvgLabelTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg.")
| 3,704 | 53.485294 | 128 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import inspect
from functools import partial
from typing import OrderedDict
import torch.nn as nn
from algorithm.core.horizontal.aggregation.api import get_aggregation_root_inst
from algorithm.core.horizontal.aggregation.api import get_aggregation_leaf_inst
from algorithm.core.loss.torch_loss import get_lossfunc
from algorithm.core.metrics import get_metric
from algorithm.core.optimizer.torch_optimizer import get_optimizer
from algorithm.core.lr_scheduler.torch_lr_scheduler import get_lr_scheduler
from common.utils.config_parser import CommonConfigParser
from common.utils.algo_utils import earlyStoppingH
from common.utils.logger import logger
from common.utils.model_io import ModelIO
from algorithm.core.horizontal.template.hooker import Hooker
class BaseTrainer(Hooker):
def __init__(self, train_conf: dict):
Hooker.__init__(self)
self.common_config = CommonConfigParser(train_conf)
self.device = self.common_config.device
# if self.common_config.early_stopping:
self.earlystopping = earlyStoppingH(
key=self.common_config.early_stopping.get("key", "acc"),
patience=self.common_config.early_stopping.get("patience", -1),
delta=self.common_config.early_stopping.get("delta", 0)
)
self.declare_hooks([
"before_global_epoch", "before_local_epoch", "before_train_loop",
"after_train_loop", "after_local_epoch", "after_global_epoch"
])
self._init_context()
self.model = self._set_model()
self.train_dataloader = self._set_train_dataloader()
self.val_dataloader = self._set_val_dataloader()
self.lossfunc = self._set_lossfunc()
self.optimizer = self._set_optimizer()
self.lr_scheduler = self._set_lr_scheduler(self.optimizer)
self.metrics = self._set_metrics()
self.aggregator = self._set_aggregator(self.common_config.identity)
def _init_context(self):
self.context['g_epoch'] = 0
self.context['l_epoch'] = 0
self.context["config"] = self.common_config.config
self.context["global_epoch_num"] = self.common_config.train_params.get("global_epoch", 0)
self.context["local_epoch_num"] = self.common_config.train_params.get("local_epoch", 0)
self.context["early_stop_flag"] = False
self.context["early_stop_epoch"] = 0
def _set_aggregator(self, party_type: str):
if party_type == "assist_trainer":
aggregator = get_aggregation_root_inst(self.common_config.encryption)
else:
aggregator = get_aggregation_leaf_inst(self.common_config.encryption)
return aggregator
def _set_model(self) -> nn.Module:
raise NotImplementedError("The _set_model method is not implemented.")
def _set_train_dataloader(self):
raise NotImplementedError(
"The _set_train_dataloader method is not implemented.")
def _set_val_dataloader(self):
raise NotImplementedError(
"The _set_val_dataloader method is not implemented.")
def _save_model(self, final: bool, context: dict):
if not os.path.exists(self.common_config.output_dir):
os.makedirs(self.common_config.output_dir)
if final:
if context["early_stop_flag"] & (context["early_stop_epoch"] > 0):
if self.common_config.output_model_name != "":
ModelIO.copy_best_model(
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_model_name,
epoch=context["early_stop_epoch"],
)
if self.common_config.output_onnx_model_name != "":
ModelIO.copy_best_model(
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_onnx_model_name,
epoch=context["early_stop_epoch"],
)
else:
if self.common_config.output_model_name != "":
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_model_name,
)
if self.common_config.output_onnx_model_name != "":
input_dim = self.common_config.model_conf.get("input_dim")
if input_dim is None:
raise ValueError("input_dim is None")
ModelIO.save_torch_onnx(
model=self.model,
input_dim=(input_dim,),
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_onnx_model_name,
)
else:
if self.common_config.save_frequency == -1:
return
if context["g_epoch"] % self.common_config.save_frequency == 0:
if self.common_config.output_model_name != "":
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_model_name,
epoch=context["g_epoch"],
)
if self.common_config.output_onnx_model_name != "":
input_dim = self.common_config.model_conf.get("input_dim")
if input_dim is None:
raise ValueError("input_dim is None")
ModelIO.save_torch_onnx(
model=self.model,
input_dim=(input_dim,),
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_onnx_model_name,
epoch=context["g_epoch"],
)
def _load_model(self, context: dict):
if self.common_config.pretrain_model_path != "":
path = os.path.join(
self.common_config.pretrain_model_path,
self.common_config.pretrain_model_name
)
state_dict = ModelIO.load_torch_model(path, device=self.device)
self.model.load_state_dict(state_dict)
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(self.common_config.optimizer)
optimizer = OrderedDict()
for k, v in optimizer_conf.items():
params = list(inspect.signature(get_optimizer(k)).parameters.values())
accepted_keys = [param.name for param in params]
v = {k: v[k] for k in v if k in accepted_keys}
optimizer[k] = get_optimizer(k)(self.model.parameters(), **v)
return optimizer
def _set_lossfunc(self):
""" Define self.lossfunc """
lossfunc_conf = OrderedDict(self.common_config.lossfunc)
lossfunc = OrderedDict()
for k, v in lossfunc_conf.items():
params = list(inspect.signature(get_lossfunc(k)).parameters.values())
accepted_keys = [param.name for param in params]
v = {k: v[k] for k in v if k in accepted_keys}
lossfunc[k] = get_lossfunc(k)(**v)
return lossfunc
def _set_lr_scheduler(self, optimizer):
lr_scheduler_conf = OrderedDict(self.common_config.lr_scheduler)
lr_scheduler = OrderedDict()
for (k, v), o in zip(lr_scheduler_conf.items(), optimizer.values()):
params = list(inspect.signature(get_lr_scheduler(k)).parameters.values())
accepted_keys = [param.name for param in params]
v = {k: v[k] for k in v if k in accepted_keys}
lr_scheduler[k] = get_lr_scheduler(k)(o, **v)
return lr_scheduler
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.common_config.metric
for k, v in metrics_conf.items():
params = list(inspect.signature(get_metric(k)).parameters.values())
accepted_keys = [param.name for param in params]
v = {k: v[k] for k in v if k in accepted_keys}
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
def _state_dict_to_device(
self, params: OrderedDict, device: str, inline: bool = True) -> OrderedDict:
if not inline:
params = copy.deepcopy(params)
for k, v in params.items():
params[k] = v.to(device)
return params
def train_loop(self):
raise NotImplementedError("The train_loop method is not implemented.")
def fit(self):
global_epoch_num = self.context["global_epoch_num"]
local_epoch_num = self.context["local_epoch_num"]
self.execute_hook_at("before_global_epoch")
for g_epoch in range(1, global_epoch_num + 1):
logger.info(f"global epoch {g_epoch}/{global_epoch_num} start...")
self.context['g_epoch'] = g_epoch
if self.execute_hook_at("before_local_epoch"):
break
for l_epoch in range(1, local_epoch_num + 1):
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} start...")
self.context['l_epoch'] = l_epoch
self.execute_hook_at("before_train_loop")
self.train_loop()
self.execute_hook_at("after_train_loop")
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} finished.")
if self.execute_hook_at("after_local_epoch"):
break
logger.info(f"global epoch {g_epoch}/{global_epoch_num} finished.")
self.execute_hook_at("after_global_epoch")
| 10,692 | 41.264822 | 99 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/fedtype.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from service.fed_config import FedConfig
def _get_assist_trainer():
aggregation_config = FedConfig.stage_config["train_info"]["params"]["aggregation_config"]
type = aggregation_config.get("type")
if type == "fedprox":
from algorithm.core.horizontal.template.torch.fedprox.assist_trainer import FedProxAssistTrainer
return FedProxAssistTrainer
elif type == "scaffold":
from algorithm.core.horizontal.template.torch.scaffold.assist_trainer import SCAFFOLDAssistTrainer
return SCAFFOLDAssistTrainer
from algorithm.core.horizontal.template.torch.fedavg.assist_trainer import FedAvgAssistTrainer
return FedAvgAssistTrainer
def _get_label_trainer():
aggregation_config = FedConfig.stage_config["train_info"]["params"]["aggregation_config"]
type = aggregation_config.get("type")
if type == "fedprox":
from algorithm.core.horizontal.template.torch.fedprox.label_trainer import FedProxLabelTrainer
return FedProxLabelTrainer
if type == "scaffold":
from algorithm.core.horizontal.template.torch.scaffold.label_trainer import SCAFFOLDLabelTrainer
return SCAFFOLDLabelTrainer
from algorithm.core.horizontal.template.torch.fedavg.label_trainer import FedAvgLabelTrainer
return FedAvgLabelTrainer | 1,913 | 43.511628 | 106 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/fedavg/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationLeafBase
from ..base import BaseTrainer
class FedAvgLabelTrainer:
def __init__(self, trainer: BaseTrainer):
self.trainer = trainer
def register(self):
self.trainer.register_hook(
place="before_local_epoch", rank=-2,
func=self._sync_early_stop_flag, desc="sync early stop flag"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-1,
func=self._download_model, desc="download global model"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-1,
func=self._upload_model, desc="upload local model"
)
# if get True, means the training is finished
def _sync_early_stop_flag(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
early_stop_flag = aggregator.download()
assert isinstance(early_stop_flag, bool)
return early_stop_flag
def _download_model(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
new_state_dict = aggregator.download()
self.trainer._state_dict_to_device(new_state_dict, self.trainer.device, inline=True)
self.trainer.model.load_state_dict(new_state_dict)
def _upload_model(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
if self.trainer.device != "cpu":
state_dict = self.trainer._state_dict_to_device(self.trainer.model.state_dict(), "cpu", inline=False)
else:
state_dict = self.trainer.model.state_dict()
weight = self.trainer.common_config.aggregation.get("weight") or \
len(self.trainer.train_dataloader)
aggregator.upload(state_dict, weight)
| 2,477 | 39.622951 | 113 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/fedavg/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationRootBase
from ..base import BaseTrainer
from service.fed_control import ProgressCalculator
class FedAvgAssistTrainer:
def __init__(self, trainer: BaseTrainer):
self.trainer = trainer
self.progress_calculator = ProgressCalculator(trainer.context["global_epoch_num"])
def register(self):
self.trainer.register_hook(
place="before_global_epoch", rank=-1,
func=self.trainer._load_model, desc="load pretrain model"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-2,
func=self._aggregate_model, desc="aggregate local models"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-2,
func=self._sync_early_stop_flag, desc="update progress bar"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-1,
func=self._broadcast_model, desc="broadcast global model"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-1,
func=self.progress_calculator.cal_horizontal_progress,
desc="update progress bar"
)
self.trainer.register_hook(
place="after_global_epoch", rank=-1,
func=self.progress_calculator.finish_progress, desc="update progress bar"
)
def _sync_early_stop_flag(self, context: dict):
aggregator: AggregationRootBase = self.trainer.aggregator
aggregator.broadcast(context["early_stop_flag"])
return context["early_stop_flag"]
def _broadcast_model(self, context: dict):
aggregator: AggregationRootBase = self.trainer.aggregator
aggregator.broadcast(self.trainer.model.state_dict())
def _aggregate_model(self, context: dict):
aggregator: AggregationRootBase = self.trainer.aggregator
new_state_dict = aggregator.aggregate()
if self.trainer.device != "cpu":
self.trainer._state_dict_to_device(new_state_dict, self.trainer.device, inline=True)
self.trainer.model.load_state_dict(new_state_dict)
| 2,818 | 40.455882 | 96 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/fedprox/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import OrderedDict
from algorithm.core.loss.torch_loss import get_lossfunc
from ..base import BaseTrainer
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationLeafBase
class FedProxLabelTrainer:
def __init__(self, trainer: BaseTrainer):
self.trainer = trainer
self.mu = self.trainer.common_config.aggregation.get("mu", 0)
def register(self):
self.trainer.register_hook(
place="before_local_epoch", rank=-3,
func=self._sync_early_stop_flag, desc="sync early stop flag"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-2, func=self._download_model,
desc="download global model"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-1, func=self._update_gmodel_params,
desc="Update gmodel param"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-1, func=self._upload_model,
desc="upload local model"
)
# if get True, means the training is finished
def _sync_early_stop_flag(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
early_stop_flag = aggregator.download()
assert isinstance(early_stop_flag, bool)
return early_stop_flag
def _download_model(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
new_state_dict = aggregator.download()
self.trainer._state_dict_to_device(new_state_dict, self.trainer.device, inline=True)
self.trainer.model.load_state_dict(new_state_dict)
def _upload_model(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
if self.trainer.device != "cpu":
state_dict = self.trainer._state_dict_to_device(
self.trainer.model.state_dict(), "cpu", inline=False
)
else:
state_dict = self.trainer.model.state_dict()
weight = self.trainer.common_config.aggregation.get("weight") or \
len(self.trainer.train_dataloader)
aggregator.upload(state_dict, weight)
def _update_gmodel_params(self, context):
self.gmodel_params = \
[param.data.detach().clone() for param in self.trainer.model.parameters()]
return
def _set_lossfunc(self):
""" Define self.lossfunc """
lossfunc_conf = OrderedDict(self.trainer.common_config.lossfunc)
lossfunc = OrderedDict()
for k, v in lossfunc_conf.items():
lossfunc[k] = self._get_fedprox_loss(k, v)
return lossfunc
def _get_fedprox_loss(self, k, v):
def fedprox_loss(pred, label):
reg = 0.0
for w_prev, w in zip(self.gmodel_params, self.trainer.model.parameters()):
reg += torch.pow(torch.norm(w - w_prev, p='fro'), 2)
loss = get_lossfunc(k)(**v)(pred, label) + self.mu * reg / 2
return loss
return fedprox_loss | 3,714 | 39.380435 | 92 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/fedprox/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationRootBase
from ..base import BaseTrainer
from service.fed_control import ProgressCalculator
class FedProxAssistTrainer:
def __init__(self, trainer: BaseTrainer):
self.trainer = trainer
self.progress_calculator = ProgressCalculator(trainer.context["global_epoch_num"])
def register(self):
self.trainer.register_hook(
place="before_global_epoch", rank=-1,
func=self.trainer._load_model, desc="load pretrain model"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-2,
func=self._aggregate_model, desc="aggregate local models"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-2,
func=self._sync_early_stop_flag, desc="update progress bar"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-1,
func=self._broadcast_model, desc="broadcast global model"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-1,
func=self.progress_calculator.cal_horizontal_progress,
desc="update progress bar"
)
self.trainer.register_hook(
place="after_global_epoch", rank=-1,
func=self.progress_calculator.finish_progress, desc="update progress bar"
)
def _sync_early_stop_flag(self, context: dict):
aggregator: AggregationRootBase = self.trainer.aggregator
aggregator.broadcast(context["early_stop_flag"])
return context["early_stop_flag"]
def _broadcast_model(self, context: dict):
aggregator: AggregationRootBase = self.trainer.aggregator
aggregator.broadcast(self.trainer.model.state_dict())
def _aggregate_model(self, context: dict):
aggregator: AggregationRootBase = self.trainer.aggregator
new_state_dict = aggregator.aggregate()
if self.trainer.device != "cpu":
self.trainer._state_dict_to_device(new_state_dict, self.trainer.device, inline=True)
self.trainer.model.load_state_dict(new_state_dict)
| 2,819 | 40.470588 | 96 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/scaffold/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from copy import deepcopy
from typing import OrderedDict, List, Optional
from torch.optim.optimizer import Optimizer, required
from ..base import BaseTrainer
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationLeafBase
class SCAFFOLDLabelTrainer:
def __init__(self, trainer: BaseTrainer):
self.trainer = trainer
self.prev_gmodel_params = None
self.gmodel_params = None
self.gmodel_grad = []
self.lmodel_grad = []
def register(self):
self.trainer.register_hook(
place="before_local_epoch", rank=-3,
func=self._sync_early_stop_flag, desc="sync early stop flag"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-2,
func=self._download_model, desc="download global model"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-1,
func=self._update_gmodel_grad, desc="Update gmodel grad"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-2,
func=self._update_lmodel_grad, desc="Update lmodel grad"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-1,
func=self._upload_model, desc="upload local model"
)
self._set_optimizer()
# if get True, means the training is finished
def _sync_early_stop_flag(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
early_stop_flag = aggregator.download()
assert isinstance(early_stop_flag, bool)
return early_stop_flag
def _download_model(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
new_state_dict = aggregator.download()
self.trainer._state_dict_to_device(
new_state_dict, self.trainer.device, inline=True)
self.trainer.model.load_state_dict(new_state_dict)
def _upload_model(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
if self.trainer.device != "cpu":
state_dict = self.trainer._state_dict_to_device(
self.trainer.model.state_dict(), "cpu", inline=False)
else:
state_dict = self.trainer.model.state_dict()
weight = self.trainer.common_config.aggregation.get("weight") or \
len(self.trainer.train_dataloader)
aggregator.upload(state_dict, weight)
def _update_gmodel_grad(self, context):
self.gmodel_grad.clear()
if self.gmodel_params:
self.prev_gmodel_params = deepcopy(self.gmodel_params)
self.gmodel_params = [p.data.detach().clone()
for p in self.trainer.model.parameters()]
if self.prev_gmodel_params:
for w, prev_w in zip(self.gmodel_params, self.prev_gmodel_params):
self.gmodel_grad.append(w.sub(prev_w))
return
def _update_lmodel_grad(self, context):
if len(self.lmodel_grad) == 0:
for l_w, g_w in zip(self.trainer.model.parameters(), self.gmodel_params):
self.lmodel_grad.append(l_w.sub(g_w))
else:
for i in range(len(self.lmodel_grad)):
self.lmodel_grad[i] += -self.gmodel_grad[i] + \
[p.data.detach() for p in self.trainer.model.parameters()][i] - \
self.gmodel_params[i]
return
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(
self.trainer.common_config.optimizer
)
optimizer = OrderedDict()
for k, v in optimizer_conf.items():
optimizer[k] = SCAFFOLDOptimizer(
self.trainer.model.parameters(), self.gmodel_grad, self.lmodel_grad,
self.trainer.common_config.train_params.get("local_epoch", 0) \
*len(self.trainer.train_dataloader), **v
)
self.trainer.optimizer = optimizer
self.trainer.lr_scheduler = self.trainer._set_lr_scheduler(self.trainer.optimizer)
class SCAFFOLDOptimizer(Optimizer):
def __init__(
self, params, gmodel_grad, lmodel_grad, iter_num, lr=required,
weight_decay=0, maximize=False, momentum=0, dampening=0,
nesterov=False, amsgrad=False
):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
defaults = dict(gmodel_grad=gmodel_grad, lmodel_grad=lmodel_grad, iter_num=iter_num, lr_history=[], lr_sum=1, lr=lr,
weight_decay=weight_decay, maximize=maximize, momentum=momentum, dampening=dampening, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
loss = None
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
d_p_list.append(p.grad)
state = self.state[p]
if 'momentum_buffer' not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state['momentum_buffer'])
sgdfold(
params_with_grad, d_p_list, momentum_buffer_list,
gmodel_grad=group['gmodel_grad'], lmodel_grad=group['lmodel_grad'],
lr_sum=group['lr_sum'], lr=group['lr'], weight_decay=group['weight_decay'],
maximize=group['maximize'], momentum=group['momentum'],
dampening=group['dampening'], nesterov=group['nesterov']
)
group['lr_history'].append(group['lr'])
if len(group['lr_history']) == group['iter_num']:
group['lr_sum'] = sum(group['lr_history'])
group['lr_history'].clear()
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state['momentum_buffer'] = momentum_buffer
return loss
def sgdfold(
params: List[Tensor], d_p_list: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
gmodel_grad: List[Tensor], lmodel_grad: List[Tensor],
lr_sum: float, lr: float, weight_decay: float, maximize: bool,
momentum: float, dampening: float, nesterov: bool
):
for i, param in enumerate(params):
d_p = d_p_list[i]
if weight_decay != 0:
d_p = d_p.add(param, alpha=weight_decay)
if momentum != 0:
buf = momentum_buffer_list[i]
if buf is None:
buf = torch.clone(d_p).detach()
momentum_buffer_list[i] = buf
else:
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
alpha = lr if maximize else -lr
beta = lr_sum if maximize else -lr_sum
if len(gmodel_grad) > 0:
param.add_(d_p - (lmodel_grad[i] -
gmodel_grad[i]) / beta, alpha=alpha)
else:
param.add_(d_p, alpha=alpha)
| 8,527 | 39.803828 | 128 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/scaffold/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationRootBase
from ..base import BaseTrainer
from service.fed_control import ProgressCalculator
class SCAFFOLDAssistTrainer:
def __init__(self, trainer: BaseTrainer):
self.trainer = trainer
self.progress_calculator = ProgressCalculator(trainer.context["global_epoch_num"])
def register(self):
self.trainer.register_hook(
place="before_global_epoch", rank=-1,
func=self.trainer._load_model, desc="load pretrain model"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-2,
func=self._aggregate_model, desc="aggregate local models"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-2,
func=self._sync_early_stop_flag, desc="update progress bar"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-1,
func=self._broadcast_model, desc="broadcast global model"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-1,
func=self.progress_calculator.cal_horizontal_progress,
desc="update progress bar"
)
self.trainer.register_hook(
place="after_global_epoch", rank=-1,
func=self.progress_calculator.finish_progress, desc="update progress bar"
)
def _sync_early_stop_flag(self, context: dict):
aggregator: AggregationRootBase = self.trainer.aggregator
aggregator.broadcast(context["early_stop_flag"])
return context["early_stop_flag"]
def _broadcast_model(self, context: dict):
aggregator: AggregationRootBase = self.trainer.aggregator
aggregator.broadcast(self.trainer.model.state_dict())
def _aggregate_model(self, context: dict):
aggregator: AggregationRootBase = self.trainer.aggregator
new_state_dict = aggregator.aggregate()
if self.trainer.device != "cpu":
self.trainer._state_dict_to_device(
new_state_dict, self.trainer.device, inline=True)
self.trainer.model.load_state_dict(new_state_dict)
| 2,818 | 39.855072 | 90 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/jax/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
from typing import OrderedDict
import flax.linen as nn
import optax
from flax.training import train_state, checkpoints
from typing import Any
from algorithm.core.horizontal.aggregation.api import get_aggregation_root_inst
from algorithm.core.horizontal.aggregation.api import get_aggregation_leaf_inst
from algorithm.core.loss.jax_loss import get_lossfunc
from algorithm.core.lr_scheduler.jax_lr_scheduler import get_lr_scheduler
from algorithm.core.optimizer.jax_optimizer import get_optimizer
from algorithm.core.metrics import get_metric
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from algorithm.core.horizontal.template.hooker import Hooker
class BaseTrainer(Hooker, TrainConfigParser):
def __init__(self, train_conf: dict):
Hooker.__init__(self)
TrainConfigParser.__init__(self, train_conf)
self.declare_hooks(["before_global_epoch", "before_local_epoch", "before_train_loop",
"after_train_loop", "after_local_epoch", "after_global_epoch"])
self.train_dataloader, self.exmp_label = self._set_train_dataloader()
self.val_dataloader, self.exmp_assist = self._set_val_dataloader()
self.model = self._set_model()
self.loss_func = self._set_lossfunc()
self.lr_scheduler = self._set_lr_scheduler()
self.state = self._set_optimizer()
self.metrics = self._set_metrics()
self.aggregator = self._set_aggregator(self.identity)
def _set_aggregator(self, party_type: str):
aggregation_config = self.train_params.get("aggregation_config", {})
encryption_params = aggregation_config.get("encryption")
#logger.info(encryption_params)
if party_type == "assist_trainer":
aggregator = get_aggregation_root_inst(encryption_params)
else:
aggregator = get_aggregation_leaf_inst(encryption_params)
return aggregator
def _set_model(self) -> nn.Module:
raise NotImplementedError("The _set_model method is not implemented.")
def _set_train_dataloader(self):
raise NotImplementedError(
"The _set_train_dataloader method is not implemented.")
def _set_val_dataloader(self):
raise NotImplementedError(
"The _set_val_dataloader method is not implemented.")
def _save_model(self, context: dict):
path = self.output["model"]["path"]
name = self.output["model"]["name"]
type = self.output["model"]["type"]
if not os.path.exists(path):
os.makedirs(path)
if type == "file":
checkpoints.save_checkpoint(
ckpt_dir=path,
target={'params': self.state.params, 'batch_stats': self.state.batch_stats},
step=0,
overwrite=True,
)
else:
raise NotImplementedError(f"Type {type} not supported.")
def _set_lossfunc(self):
""" Define self.loss_func """
loss_func = None
loss_func_conf = OrderedDict(self.train_params.get("lossfunc_config", {}))
for k in loss_func_conf.keys():
self.loss_func_name = k
loss_func = get_lossfunc(k)
return loss_func
def _set_lr_scheduler(self):
""" Define self.lr_scheduler """
lr_scheduler = None
lr_scheduler_conf = OrderedDict(self.train_params.get("lr_scheduler_config", {}))
for k, v in lr_scheduler_conf.items():
lr_scheduler = get_lr_scheduler(k)(**v)
return lr_scheduler
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(self.train_params.get("optimizer_config", {}))
optimizer = None
for k, v in optimizer_conf.items():
opt_class = get_optimizer(k)
if self.lr_scheduler:
optimizer = optax.chain(optax.clip(1.0), opt_class(self.lr_scheduler, **v))
else:
optimizer = optax.chain(optax.clip(1.0), opt_class(**v))
state = None
if optimizer:
state = TrainState.create(
apply_fn=self.model.apply,
params=self.init_params,
batch_stats=self.init_batch_stats,
tx=optimizer
)
return state
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.train_params.get("metric_config", {})
for k, v in metrics_conf.items():
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
def train_loop(self):
raise NotImplementedError("The train_loop method is not implemented.")
def fit(self):
current_epoch = 1
self.context["current_epoch"] = current_epoch
self.context["train_conf"] = self.train_conf
global_epoch_num = self.train_params.get("global_epoch", 0)
local_epoch_num = self.train_params.get("local_epoch", 0)
self.execute_hook_at("before_global_epoch")
for g_epoch in range(1, global_epoch_num + 1):
logger.info(f"global epoch {g_epoch}/{global_epoch_num} start...")
self.context['g_epoch'] = g_epoch
if self.execute_hook_at("before_local_epoch"):
break
for l_epoch in range(1, local_epoch_num + 1):
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} start...")
self.context['l_epoch'] = l_epoch
self.execute_hook_at("before_train_loop")
self.train_loop()
self.execute_hook_at("after_train_loop")
current_epoch += 1
self.context["current_epoch"] = current_epoch
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} finished.")
if self.execute_hook_at("after_local_epoch"):
break
logger.info(f"global epoch {g_epoch}/{global_epoch_num} finished.")
self.execute_hook_at("after_global_epoch")
class TrainState(train_state.TrainState):
# A simple extension of TrainState to also include batch statistics
batch_stats: Any | 6,958 | 36.820652 | 99 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/jax/fedtype.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from service.fed_config import FedConfig
def _get_assist_trainer():
aggregation_config = FedConfig.stage_config["train_info"]["params"]["aggregation_config"]
type = aggregation_config.get("type")
from algorithm.core.horizontal.template.jax.fedavg.assist_trainer import FedAvgAssistTrainer
return FedAvgAssistTrainer
def _get_label_trainer():
aggregation_config = FedConfig.stage_config["train_info"]["params"]["aggregation_config"]
type = aggregation_config.get("type")
from algorithm.core.horizontal.template.jax.fedavg.label_trainer import FedAvgLabelTrainer
return FedAvgLabelTrainer | 1,227 | 41.344828 | 96 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/jax/fedavg/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationLeafBase
from ..base import BaseTrainer
class FedAvgLabelTrainer(BaseTrainer):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
self.register_hook(place="before_local_epoch", rank=1,
func=self._download_model, desc="download global model")
self.register_hook(place="after_local_epoch", rank=1,
func=self._upload_model, desc="upload local model")
def _download_model(self, context: dict):
aggregator: AggregationLeafBase = self.aggregator
self.state_dict = aggregator.download()
self.state_dict_to_state()
def _upload_model(self, context: dict):
aggregator: AggregationLeafBase = self.aggregator
aggregation_config = self.train_params["aggregation_config"]
weight = aggregation_config.get("weight") or len(self.train_dataloader.dataset)
self.state_to_state_dict()
aggregator.upload(self.state_dict, weight)
| 1,691 | 41.3 | 87 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/jax/fedavg/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationRootBase
from ..base import BaseTrainer
class FedAvgAssistTrainer(BaseTrainer):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
self.current_epoch = 0
self.register_hook(place="before_local_epoch", rank=1,
func=self._broadcast_model, desc="broadcast global model")
self.register_hook(place="after_local_epoch", rank=1,
func=self._aggregate_model, desc="aggregate local models")
def _broadcast_model(self, context: dict):
aggregator: AggregationRootBase = self.aggregator
aggregator.broadcast(self.state_dict)
def _aggregate_model(self, context: dict):
aggregator: AggregationRootBase = self.aggregator
self.state_dict = aggregator.aggregate()
self.state_dict_to_state()
self.current_epoch += 1
| 1,572 | 39.333333 | 86 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/paddle/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
from typing import OrderedDict
import paddle
from algorithm.core.horizontal.aggregation.api import get_aggregation_root_inst
from algorithm.core.horizontal.aggregation.api import get_aggregation_leaf_inst
from algorithm.core.loss.paddle_loss import get_lossfunc
from algorithm.core.metrics import get_metric
from algorithm.core.optimizer.paddle_optimizer import get_optimizer
from algorithm.core.lr_scheduler.paddle_lr_scheduler import get_lr_scheduler
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from ..hooker import Hooker
class BaseTrainer(Hooker, TrainConfigParser):
def __init__(self, train_conf: dict):
Hooker.__init__(self)
TrainConfigParser.__init__(self, train_conf)
self.declare_hooks(["before_global_epoch", "before_local_epoch", "before_train_loop",
"after_train_loop", "after_local_epoch", "after_global_epoch"])
self.model = self._set_model()
self.train_dataloader = self._set_train_dataloader()
self.val_dataloader = self._set_val_dataloader()
self.loss_func = self._set_lossfunc()
self.lr_scheduler = self._set_lr_scheduler()
self.optimizer = self._set_optimizer()
self.metrics = self._set_metrics()
self.aggregator = self._set_aggregator(self.identity)
def _set_aggregator(self, party_type: str):
aggregation_config = self.train_params.get("aggregation_config", {})
encryption_params = aggregation_config.get("encryption")
#logger.info(encryption_params)
if party_type == "assist_trainer":
aggregator = get_aggregation_root_inst(encryption_params)
else:
aggregator = get_aggregation_leaf_inst(encryption_params)
return aggregator
def _set_model(self):
raise NotImplementedError("The _set_model method is not implemented.")
def _set_train_dataloader(self):
raise NotImplementedError(
"The _set_train_dataloader method is not implemented.")
def _set_val_dataloader(self):
raise NotImplementedError(
"The _set_val_dataloader method is not implemented.")
def _save_model(self, context: dict):
path = self.output["model"]["path"]
name = self.output["model"]["name"]
type = self.output["model"]["type"]
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, name)
if type == "file":
paddle.save(self.model.state_dict(),path)
else:
raise NotImplementedError(f"Type {type} not supported.")
def _load_model(self, context: dict):
pretrain_model_conf = self.input["pretrain_model"]
if pretrain_model_conf != {}:
path = os.path.join(
pretrain_model_conf["path"], pretrain_model_conf["name"])
self.model.load_weights(path)
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(
self.train_params.get("optimizer_config", {}))
optimizer = OrderedDict()
if self.lr_scheduler:
for k, v in optimizer_conf.items():
optimizer[k] = get_optimizer(k)(learning_rate=list(self.lr_scheduler.values())[0], parameters=self.model.parameters(), **v)
else:
for k, v in optimizer_conf.items():
optimizer[k] = get_optimizer(k)(parameters=self.model.parameters(), **v)
return optimizer
def _set_lossfunc(self):
""" Define self.loss_func """
loss_func_conf = OrderedDict(
self.train_params.get("lossfunc_config", {}))
loss_func = OrderedDict()
for k, v in loss_func_conf.items():
loss_func[k] = get_lossfunc(k)(**v)
return loss_func
def _set_lr_scheduler(self):
lr_scheduler_conf = OrderedDict(
self.train_params.get("lr_scheduler_config", {}))
lr_scheduler = OrderedDict()
for (k, v) in lr_scheduler_conf.items():
lr_scheduler[k] = get_lr_scheduler(k)(**v)
return lr_scheduler
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.train_params.get("metric_config", {})
for k, v in metrics_conf.items():
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
def train_loop(self):
raise NotImplementedError("The train_loop method is not implemented.")
def fit(self):
current_epoch = 1
self.context["current_epoch"] = current_epoch
self.context["train_conf"] = self.train_conf
global_epoch_num = self.train_params.get("global_epoch", 0)
local_epoch_num = self.train_params.get("local_epoch", 0)
self.execute_hook_at("before_global_epoch")
for g_epoch in range(1, global_epoch_num + 1):
logger.info(f"global epoch {g_epoch}/{global_epoch_num} start...")
self.context['g_epoch'] = g_epoch
if self.execute_hook_at("before_local_epoch"):
break
for l_epoch in range(1, local_epoch_num + 1):
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} start...")
self.context['l_epoch'] = l_epoch
self.execute_hook_at("before_train_loop")
self.train_loop()
self.execute_hook_at("after_train_loop")
current_epoch += 1
self.context["current_epoch"] = current_epoch
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} finished.")
if self.execute_hook_at("after_local_epoch"):
break
logger.info(f"global epoch {g_epoch}/{global_epoch_num} finished.")
self.execute_hook_at("after_global_epoch")
| 6,635 | 37.137931 | 139 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/paddle/fedavg/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import OrderedDict
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationLeafBase
from ..base import BaseTrainer
from collections import OrderedDict
class FedAvgLabelTrainer(BaseTrainer):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
self.register_hook(place="before_local_epoch", rank=1,
func=self._download_model, desc="download global model")
self.register_hook(place="after_local_epoch", rank=1,
func=self._upload_model, desc="upload local model")
def _download_model(self, context: dict):
aggregator: AggregationLeafBase = self.aggregator
new_state_dict = aggregator.download()
self.model.set_state_dict(new_state_dict)
def _upload_model(self, context: dict):
aggregator: AggregationLeafBase = self.aggregator
state_dict = self._rebuild_state_dict(self.model.state_dict())
aggregation_config = self.train_params["aggregation_config"]
weight = aggregation_config.get("weight") or len(self.train_dataloader.dataset)
aggregator.upload(state_dict, weight)
def _rebuild_state_dict(self, state_dict):
new_state_dict = OrderedDict()
for k, v in state_dict.items():
new_state_dict[k] = v.numpy()
return new_state_dict | 2,000 | 42.5 | 87 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/paddle/fedavg/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationRootBase
from ..base import BaseTrainer
from collections import OrderedDict
class FedAvgAssistTrainer(BaseTrainer):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
self.register_hook(place="before_local_epoch", rank=1,
func=self._broadcast_model, desc="broadcast global model")
self.register_hook(place="after_local_epoch", rank=1,
func=self._aggregate_model, desc="aggregate local models")
def _broadcast_model(self, context: dict):
aggregator: AggregationRootBase = self.aggregator
aggregator.broadcast(self._rebuild_state_dict(self.model.state_dict()))
def _aggregate_model(self, context: dict):
aggregator: AggregationRootBase = self.aggregator
new_state_dict = aggregator.aggregate()
self.model.set_state_dict(new_state_dict)
def _rebuild_state_dict(self, state_dict):
new_state_dict = OrderedDict()
for k, v in state_dict.items():
new_state_dict[k] = v.numpy()
return new_state_dict | 1,791 | 40.674419 | 86 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/tensorflow/fedavg/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from functools import partial
from typing import OrderedDict
from algorithm.core.horizontal.aggregation.api import get_aggregation_root_inst
from algorithm.core.horizontal.aggregation.api import get_aggregation_leaf_inst
from algorithm.core.loss.tf_loss import get_lossfunc
from algorithm.core.metrics import get_metric
from algorithm.core.optimizer.tf_optimizer import get_optimizer
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from algorithm.core.horizontal.template.hooker import Hooker
import tensorflow.keras as keras
class BaseTrainer(Hooker, TrainConfigParser):
def __init__(self, train_conf: dict):
Hooker.__init__(self)
TrainConfigParser.__init__(self, train_conf)
self.declare_hooks(["before_global_epoch", "before_local_epoch", "before_train_loop",
"after_train_loop", "after_local_epoch", "after_global_epoch"])
self.model = self._set_model()
self.train_dataloader = self._set_train_dataloader()
self.val_dataloader = self._set_val_dataloader()
self.loss_func = self._set_lossfunc()
self.optimizer = self._set_optimizer()
self.metrics = self._set_metrics()
self.aggregator = self._set_aggregator(self.identity)
def _set_aggregator(self, party_type: str):
aggregation_config = self.train_params.get("aggregation_config", {})
encryption_params = aggregation_config.get("encryption")
# logger.info(encryption_params)
if party_type == "assist_trainer":
aggregator = get_aggregation_root_inst(encryption_params)
else:
aggregator = get_aggregation_leaf_inst(encryption_params)
return aggregator
def _set_model(self) -> keras.Model:
raise NotImplementedError("The _set_model method is not implemented.")
def _set_train_dataloader(self):
raise NotImplementedError(
"The _set_train_dataloader method is not implemented.")
def _set_val_dataloader(self):
raise NotImplementedError(
"The _set_val_dataloader method is not implemented.")
def _save_model(self, context: dict):
path = self.output["model"]["path"]
name = self.output["model"]["name"]
type = self.output["model"]["type"]
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, name)
if type == "file":
self.model.save_weights(path)
else:
raise NotImplementedError(f"Type {type} not supported.")
def _load_model(self, context: dict):
pretrain_model_conf = self.input["pretrain_model"]
if pretrain_model_conf != {}:
path = os.path.join(
pretrain_model_conf["path"], pretrain_model_conf["name"])
self.model.load_weights(path)
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(
self.train_params.get("optimizer_config", {}))
optimizer = OrderedDict()
for k, v in optimizer_conf.items():
optimizer[k] = get_optimizer(k)(**v)
return optimizer
def _set_lossfunc(self):
""" Define self.loss_func """
loss_func_conf = OrderedDict(
self.train_params.get("lossfunc_config", {}))
loss_func = OrderedDict()
for k, v in loss_func_conf.items():
loss_func[k] = get_lossfunc(k)(**v)
return loss_func
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.train_params.get("metric_config", {})
for k, v in metrics_conf.items():
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
def train_loop(self):
raise NotImplementedError("The train_loop method is not implemented.")
def fit(self):
current_epoch = 1
self.context["current_epoch"] = current_epoch
self.context["train_conf"] = self.train_conf
global_epoch_num = self.train_params.get("global_epoch", 0)
local_epoch_num = self.train_params.get("local_epoch", 0)
self.execute_hook_at("before_global_epoch")
for g_epoch in range(1, global_epoch_num + 1):
logger.info(f"global epoch {g_epoch}/{global_epoch_num} start...")
self.context['g_epoch'] = g_epoch
if self.execute_hook_at("before_local_epoch"):
break
for l_epoch in range(1, local_epoch_num + 1):
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} start...")
self.context['l_epoch'] = l_epoch
self.execute_hook_at("before_train_loop")
self.train_loop()
self.execute_hook_at("after_train_loop")
current_epoch += 1
self.context["current_epoch"] = current_epoch
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} finished.")
if self.execute_hook_at("after_local_epoch"):
break
logger.info(f"global epoch {g_epoch}/{global_epoch_num} finished.")
self.execute_hook_at("after_global_epoch")
| 5,968 | 36.074534 | 99 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/tensorflow/fedavg/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import OrderedDict
import tensorflow as tf
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationLeafBase
from .base import BaseTrainer
from collections import OrderedDict
class FedAvgLabelTrainer(BaseTrainer):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
self.register_hook(place="before_local_epoch", rank=1,
func=self._download_model, desc="download global model")
self.register_hook(place="after_local_epoch", rank=1,
func=self._upload_model, desc="upload local model")
def _download_model(self, context: dict):
aggregator: AggregationLeafBase = self.aggregator
model_weight_dict = aggregator.download()
self.model.set_weights(list(model_weight_dict.values()))
def _upload_model(self, context: dict):
aggregator: AggregationLeafBase = self.aggregator
model_weight = self.model.get_weights()
model_weight_dict = OrderedDict({i:w for i,w in enumerate(model_weight)})
aggregation_config = self.train_params["aggregation_config"]
weight = aggregation_config.get("weight") or len(self.train_dataloader._input_dataset)
aggregator.upload(model_weight_dict, weight)
| 1,915 | 44.619048 | 94 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/tensorflow/fedavg/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationRootBase
from .base import BaseTrainer
from collections import OrderedDict
class FedAvgAssistTrainer(BaseTrainer):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
self.current_epoch = 0
self.register_hook(place="before_local_epoch", rank=1,
func=self._broadcast_model, desc="broadcast global model")
self.register_hook(place="after_local_epoch", rank=1,
func=self._aggregate_model, desc="aggregate local models")
def _broadcast_model(self, context: dict):
aggregator: AggregationRootBase = self.aggregator
model_weight = self.model.get_weights()
model_weight_dict = OrderedDict({i:w for i,w in enumerate(model_weight)})
aggregator.broadcast(model_weight_dict)
def _aggregate_model(self, context: dict):
aggregator: AggregationRootBase = self.aggregator
model_weight_dict = aggregator.aggregate()
self.model.set_weights(list(model_weight_dict.values()))
self.current_epoch += 1
| 1,761 | 43.05 | 86 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/cat_param_parser.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from algorithm.core.tree_ray.xgb_head import XgbRayHeadMaster
def parse_category_param(ray_head_master: XgbRayHeadMaster,
col_index: str = "",
col_names: list[str] = [],
max_num_value: int = 0,
col_index_type: str = 'inclusive',
col_names_type: str = 'inclusive',
max_num_value_type: str = 'union') -> list[int]:
""" Calculate column indexes and column names of category. The formulation is:
features that column indexes are in col_index if col_index_type is 'inclusive' or not in col_index if col_index_type is 'exclusive'.
union
features that column names are in col_names if col_names_type is 'inclusive' or not in col_names if col_names_type is 'exclusive'.
union if max_num_value_type is 'union' or intersect if max_num_value_type is 'intersection'
features that number of different values is less equal than max_num_value.
Args:
ray_head_master (XgbRayHeadMaster): a handler for controlling ray cluster.
col_index (str): column index of features which are supposed to be (or not to be) a categorial feature. Defaults to "".
col_names (list[str]): column names of features which are supposed to be (or not to be) a categorical feature. Defaults to [].
max_num_value (int): if n <= max_num_value where n is the number of different values in a feature column, then the feature is supposed to be a category feature. Defalts to 0.
col_index_type (str, optional): support 'inclusive' and 'exclusive'. Defaults to 'inclusive'.
col_names_type (str, optional): support 'inclusive' and 'exclusive'. Defaults to 'inclusive'.
max_num_value_type (str, optional): support 'intersection' and 'union'. Defaults to 'union'.
Returns:
list[int]: list of categorial feature column indexes.
Note:
col_index is count from the first column of features, not the input table.
col_index support single value and slice. For example, a vaild form of col_index is "2, 4:8, -7, -10:-7", where "4:8" means "4,5,6,7",
vaild form of col_names is like ["wage", "age"].
"""
res = []
feature_names = ray_head_master.dataset_info.feature_names
if col_index != "":
index1 = _parse_index(col_index, len(feature_names))
if col_index_type == 'inclusive':
res += index1
elif col_index_type == 'exclusive':
res += list(set(range(len(feature_names))) - set(index1))
else:
raise ValueError(
f"col_index_type {col_index_type} not valid, need to be one of the 'inclusive' and 'exclusive'.")
if col_names != []:
index2 = _parse_names(col_names, feature_names)
if col_names_type == 'inclusive':
res += index2
elif col_names_type == 'exclusive':
res += list(set(range(len(feature_names))) - set(index2))
else:
raise ValueError(
f"col_names_type {col_names_type} not valid, need to be one of the 'inclusive' and 'exclusive'.")
res = list(set(res))
if max_num_value > 0:
if max_num_value_type == "union":
num_unique = ray_head_master.nunique(cols=None).to_numpy()
# num_unique = df.nunique().to_numpy()
index3 = list(np.where(num_unique <= max_num_value)[0])
res += index3
elif max_num_value_type == "intersection":
col_selection = [False for i in range(len(feature_names))]
for i in res:
col_selection[i] = True
num_unique = ray_head_master.nunique(cols=col_selection).to_numpy()
# df_category = df.iloc[:, col_selection]
# num_unique = df_category.nunique().to_numpy()
index3 = list(np.where(num_unique <= max_num_value)[0])
res = list(map(lambda x: res[x], index3))
else:
raise ValueError(
f"max_num_value_type {max_num_value_type} not valid, need to be one of the 'union' and 'intersect'.")
res = list(set(res))
return res
def _parse_index(index: str, num_cols: int) -> list[int]:
''' index form is "1, 3:5, 4, 8:11'''
res = []
index_list = index.replace(' ', '').split(',')
for value in index_list:
if ':' in value:
left, right = value.split(':')
if left == "":
left = 0
if int(left) < 0:
left = min(max(0, num_cols + int(left)), num_cols)
if right == "":
right = num_cols
if int(right) < 0:
right = min(max(0, num_cols + int(right)), num_cols)
res += [i for i in range(int(left), int(right))]
else:
value = int(value)
if abs(value) >= num_cols:
raise ValueError(f"Column index {value} is greater equal than the column size {num_cols}")
if value < 0:
value += num_cols
res.append(value)
res = list(set(res))
return res
def _parse_names(names: list[str], valid_names: list[str]) -> list[int]:
res = []
name_list = [item.strip() for item in names]
for name in name_list:
try:
i = valid_names.index(name)
res.append(i)
except ValueError as e:
raise ValueError(f"Column name {name} not found: {e}")
res = list(set(res))
return res
| 6,228 | 39.448052 | 182 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/xgb_head.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from typing import List, Union, Optional, Dict
import pandas as pd
import numpy as np
import ray
from algorithm.core.tree_ray.assign_ray_input import gen_actor_indices
from algorithm.core.tree_ray.dataloader_head import XgbDataLoaderHead
from algorithm.core.tree_ray.dataframe_head import XgbDataFrameHead
from algorithm.core.tree_ray.tree_train_head import TreeTrainHead
from algorithm.core.tree_ray.dataset_info import RayDatasetInfo
from algorithm.core.tree_ray.xgb_actor import XgbActor
from algorithm.core.tree.tree_structure import BoostingTree, Tree, Node
from common.crypto.paillier.paillier import PaillierContext
from common.utils.logger import logger
class XgbRayHeadMaster:
def __init__(self, ray_tasks_num_returns: Optional[int] = None):
current_path = Path(os.path.dirname(__file__))
package_path = current_path.parent.parent.parent
ray.init(runtime_env={"working_dir": package_path, "excludes": ["core.*"]})
num_cores = int(ray.cluster_resources()['CPU'])
logger.info(f"Ray cluster core number: {num_cores}")
self.ray_tasks_num_returns = ray_tasks_num_returns
self.ray_actors = [
XgbActor.options(num_cpus=1).remote() for i in range(num_cores)
]
self.dataset_info: RayDatasetInfo = None
self.val_dataset_info: RayDatasetInfo = None
self.test_dataset_info: RayDatasetInfo = None
self.ray_dataframe_head = None
self.tree_train_head = TreeTrainHead(self.ray_actors, ray_tasks_num_returns)
def scatter_data(self,
path_list: list[str],
dataset_type: str,
has_id: bool = True,
has_label: bool = True,
missing_values: Union[float, List[float]] = [],
atomic_row_size_per_cpu_core: int = 5000,
is_centralized: bool = True,
file_type: str = 'csv'):
dataset_info = XgbDataLoaderHead.scatter_data(path_list,
dataset_type,
self.ray_actors,
has_id,
has_label,
missing_values,
atomic_row_size_per_cpu_core,
is_centralized,
file_type)
if dataset_type == "train":
self.dataset_info = dataset_info
elif dataset_type == "val":
self.val_dataset_info = dataset_info
else:
self.test_dataset_info = dataset_info
return
def nunique(self, cols: Optional[List[Union[bool, int]]] = None):
if self.ray_dataframe_head is None:
self.ray_dataframe_head = XgbDataFrameHead(self.dataset_info, self.ray_tasks_num_returns)
res = self.ray_dataframe_head.nunique(cols)
return res
def set_cat_features(self, names: List[str]):
if self.ray_dataframe_head is None:
self.ray_dataframe_head = XgbDataFrameHead(self.dataset_info, self.ray_tasks_num_returns)
self.ray_dataframe_head.set_cat_features(names)
self.dataset_info.cat_names = names
return
def xgb_binning(self, num_bins: int):
if self.ray_dataframe_head is None:
self.ray_dataframe_head = XgbDataFrameHead(self.dataset_info, self.ray_tasks_num_returns)
split_points = self.ray_dataframe_head.xgb_binning(num_bins)
self.ray_dataframe_head.set_split_points(split_points)
return split_points
def sync_all_trees(self, boosting_tree: BoostingTree):
self.tree_train_head.sync_all_trees(boosting_tree)
return
def sync_latest_tree(self, tree: Tree, lr: float, max_depth: int):
self.tree_train_head.sync_latest_tree(tree, lr, max_depth)
return
def sync_config(self,
paillier_context: PaillierContext,
cat_smooth: float,
lambda_: float):
self.tree_train_head.sync_config(paillier_context,
cat_smooth,
lambda_)
return
def new_big_feature(self,
indices: Optional[np.ndarray],
columns: Optional[List[str]],
grad: Optional[np.ndarray],
hess: Optional[np.ndarray],
grad_hess: Optional[np.ndarray]):
self.dataset_info.big_feature_names = columns
indices_dict, grad_dict, hess_dict, grad_hess_dict = \
self.tree_train_head.new_big_feature(indices,
columns,
grad,
hess,
grad_hess,
self.dataset_info)
return indices_dict, grad_dict, hess_dict, grad_hess_dict
def gen_big_feature_updater(self,
columns: Optional[List[str]]):
self.dataset_info.big_feature_names = columns
big_feature_updater = self.tree_train_head.gen_big_feature_updater(self.dataset_info.block_to_actor_map,
columns)
return big_feature_updater
def gen_node_hist_iterator(self,
node_id: str,
packed: bool,
calc_count: bool,
indices: Optional[np.ndarray],
col_step: Optional[int] = None):
""" Calc hist for a node with samples selected by indices
Args:
indices (Optional[np.ndarray]): indices of samples, if None, all the samples in big_feature are used.
col_step (Optional[int], optional): num of features to calc in one step, if None, all features will be used
in one step. Defaults to None.
"""
num_features = len(self.dataset_info.big_feature_names)
if col_step is not None and col_step >= num_features:
col_step = None
indices = gen_actor_indices(indices, self.dataset_info)
hist_iterator = self.tree_train_head.gen_node_hist_iterator(node_id, packed, calc_count, indices, num_features, col_step)
return hist_iterator
def encrypt_grad_hess(self,
packed: bool,
context: PaillierContext,
precision: Optional[float]):
iterator = self.tree_train_head.encrypt_grad_hess(packed=packed,
block_to_actor_map=self.dataset_info.block_to_actor_map,
context=context,
precision=precision,
lazy_return=True)
return iterator
def filter_sample_index(self,
node_id: str,
feature_name: str,
condition: Union[int, List[int]]):
sample_index = self.tree_train_head.filter_sample_index(node_id, feature_name, condition)
return sample_index
def free_node_big_feature(self, node_id: str):
self.tree_train_head.free_node_big_feature(node_id)
return
def calc_split_info(self,
is_remote: bool,
hist_dict: Dict[str, pd.DataFrame],
cat_names: List[str]):
hint_split_info_iterator = self.tree_train_head.calc_split_info(is_remote,
hist_dict,
cat_names,
lazy_return=True)
return hint_split_info_iterator
def make_indicator_for_prediction_on_tree(self, tree: Tree, local_party_id: str, dataset_type: str):
indicator = self.tree_train_head.make_indicator_for_prediction_on_tree(tree, local_party_id, dataset_type)
return indicator
def make_indicator_for_prediction_on_boosting_tree(self, boosting_tree: BoostingTree, local_party_id: str, dataset_type: str):
indicator = self.tree_train_head.make_indicator_for_prediction_on_boosting_tree(boosting_tree, local_party_id, dataset_type)
return indicator
def make_indicator_for_prediction_on_nodes(self, nodes: Dict[str, Node], dataset_type: str):
if len(nodes) == 0:
return {}
indicator = self.tree_train_head.make_indicator_for_prediction_on_nodes(nodes, dataset_type)
return indicator
def predict_on_tree(self, tree: Tree, indicator: Dict[int, Dict[str, np.ndarray]], dataset_type: str):
if dataset_type == "train":
actor_to_block_map = self.dataset_info.actor_to_block_map
elif dataset_type == "val":
actor_to_block_map = self.val_dataset_info.actor_to_block_map
elif dataset_type == "test":
actor_to_block_map = self.test_dataset_info.actor_to_block_map
prediction = self.tree_train_head.predict_on_tree(tree, indicator, actor_to_block_map)
return prediction
def predict_on_boosting_tree(self, boosting_tree: BoostingTree, indicator: Dict[int, Dict[str, np.ndarray]], dataset_type: str):
if dataset_type == "train":
actor_to_block_map = self.dataset_info.actor_to_block_map
elif dataset_type == "val":
actor_to_block_map = self.val_dataset_info.actor_to_block_map
elif dataset_type == "test":
actor_to_block_map = self.test_dataset_info.actor_to_block_map
prediction = self.tree_train_head.predict_on_boosting_tree(boosting_tree, indicator, actor_to_block_map)
return prediction
if __name__ == "__main__":
import time
from pandas.testing import assert_frame_equal
# path = ['/root/dataset/fate_dataset/fake_guest.csv',
# '/root/dataset/fate_dataset/fake_guest.csv']
path = ['/root/dataset/fate_dataset/fake_guest.csv']
# a = XgbRayHeadMaster(path, atomic_row_size_per_cpu_core=3300, is_centralized=True, file_type='csv') # 3300)
a = XgbRayHeadMaster()
a.scatter_data(path,
dataset_type='train',
has_id=True,
has_label=True,
missing_values=[np.nan],
atomic_row_size_per_cpu_core=5000,
is_centralized=True,
file_type='csv')
print(a.dataset_info.actor_to_block_map)
res = a.nunique()
print(res)
res = a.nunique([2, 5, 10, 1, 8])
print(res)
print(res.to_numpy())
print('ok')
a.set_cat_features(['x2', 'x3', 'x9'])
split_points = a.xgb_binning(num_bins=16)
print(split_points)
# a.set_loss_func('BCEWithLogitsLoss')
print(a.dataset_info.block_to_actor_map)
rows = sum([shape[0] for shape in a.dataset_info.shape])
# indices = None
# columns = None
# # grad = 0.5 - a.dataset_info.label
# grad = np.arange(len(a.dataset_info.label))
# hess = grad * (1 - grad)
# a.new_big_feature(indices,
# columns,
# grad,
# hess,
# None)
indices = np.array([1, 2, 3, 4, 800, 900, 8000, 9000, 13000, 13001, 3300*6+1000, 3300*6+1100])
columns = None
# grad = 0.5 - a.dataset_info.label
grad = np.arange(len(a.dataset_info.label))
hess = grad * (1 - grad)
grad = grad[indices]
hess = hess[indices]
a.new_big_feature(indices,
columns,
grad,
hess,
None)
indices = np.array([1, 2, 3, 800, 900, 8000, 9000, 13000, 13001, 3300*6+1000, 3300*6+1100])
batch_cols = None
# a.calc_hist_for_node(indices, None)
start = time.time()
hist1 = a.calc_hist_for_node(indices, 10)
print(time.time() - start)
print(hist1)
start = time.time()
hist2 = a.calc_hist_for_node(indices, None)
print(time.time() - start)
# print(hist2)
for k in hist1:
assert_frame_equal(hist1[k], hist2[k])
# xfl_grad xfl_hess
# sum sum
# x2
# 3 1.0 0.0
# 5 3.0 -6.0
# 10 1.0 0.0
# 13 2.0 -2.0
# 15 1708.0 -1448306.0 -----
# test_case
# for statistic_df
# import pandas as pd
# df = pd.read_csv(path[0], index_col=0)
# train_features = df.iloc[:, 1:].astype(np.float32)
# # df[['x2', 'x3', 'x9']] = df[['x2', 'x3', 'x9']].astype('category')
# out = train_features['x2'].value_counts().sort_index()
# print(out)
# print(out.shape)
# assert (out == res['x2'][0]).all()
# a, b = train_features['x1'].min(), train_features['x1'].max()
# print(a, b)
# assert a == res['x1'][0][0]
# assert b == res['x1'][0][1]
# class A():
# def __init__(self, a) -> None:
# self.a = a
# class B(A):
# def __init__(self, a) -> None:
# super().__init__(a)
# self.b = 1
# class C(A):
# def __init__(self, a) -> None:
# super().__init__(a)
# self.c = 1
# class D(B, C):
# def __init__(self, a) -> None:
# super().__init__(a+1)
# self.d = 1
# d = D(2)
# print(d.a) | 14,763 | 39.119565 | 132 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/dataset_info.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Dict
import numpy as np
from ray.actor import ActorHandle
class RayDatasetInfo:
def __init__(self):
self.feature_names: List[str] = None
self.label_name: str = None
self.cat_names: List[str] = None
self.shape: List[Tuple(int, int)] = None
self.label: np.ndarray = None
self.indices: np.ndarray = None
self.actor_to_block_map: Dict[ActorHandle, int] = {}
self.block_to_actor_map: Dict[int, ActorHandle] = {}
self.blocks_shape: Dict[int, Tuple(int, int)] = {}
self.split_points: Dict[str, list] = {}
self.big_feature_names: List[str] = None
def to_dict(self):
res = {
"feature_names": self.feature_names,
"label_name": self.label_name,
"shape": self.shape,
"actor_to_block_map": self.actor_to_block_map,
"block_to_actor_map": self.block_to_actor_map,
"blocks_shape": self.blocks_shape
}
return res
def to_tidy_dict(self):
res = {
"shape": self.shape,
"actor_to_block_map": self.actor_to_block_map,
"blocks_shape": self.blocks_shape
}
return res
| 1,863 | 32.890909 | 74 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/xgb_actor.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
from typing import Dict, List, Tuple, Optional, Union
import numpy as np
import pandas as pd
import ray
from algorithm.core.tree_ray.big_feature import Feature
from algorithm.core.tree.xgboost_loss import XGBLoss
from algorithm.core.tree.tree_structure import Tree, BoostingTree, Node
from algorithm.core.paillier_acceleration import embed, unpack
from algorithm.core.tree.gain_calc import cal_cat_rank, cal_gain, cal_weight
from common.crypto.paillier.paillier import Paillier, PaillierContext
"""
Note: Typings in Actors are for better understanding, they are ActorHandle actually.
"""
class XgbBaseActor:
def __init__(self):
self.features: Dict[int, pd.DataFrame] = {}
self.label: Dict[int, Optional[np.ndarray]] = {}
self.loss_method: Optional[str] = None
self.loss_func: XGBLoss = None
self.boosting_tree: Optional[BoostingTree] = None
self.val_features: Dict[int, pd.DataFrame] = {}
self.val_label: Dict[int, np.ndarray] = {}
self.test_features: Dict[int, pd.DataFrame] = {}
self.big_feature: Dict[int, Feature] = {}
self.node_big_feature: Dict[str, Dict[int, Feature]] = {}
self.split_points: Dict[str, list] = {}
self.split_point_bin_map: Dict[str, Dict[float: int]] = {}
self.paillier_context: PaillierContext = None
self.cat_names: List[str] = []
self.cat_smooth: float = None
self.lambda_: float = None
self.actor_id = ray.get_runtime_context().get_actor_id()
self.node_id = ray.get_runtime_context().get_node_id()
def report_actor_id(self):
return self.actor_id
def report_node_id(self):
return self.node_id
class RayCentralCsvActor:
def __init__(self):
super().__init__()
@classmethod
def recv_data(cls,
data: list, # [int, pd.DataFrame]
has_label: bool,
missing_values: List[float]):
features: Dict[int, pd.DataFrame] = {}
label: Dict[int, Optional[np.ndarray]] = {}
if has_label:
features[data[0]] = data[1].iloc[:, 1:]
label[data[0]] = data[1].iloc[:, 0].to_numpy()
else:
features[data[0]] = data[1]
label[data[0]] = None
if missing_values != []:
# features[data[0]].replace({k: 0 for k in missing_values}, inplace=True)
features[data[0]] = features[data[0]].replace({k: 0 for k in missing_values})
return features, label
class XgbDataFrameActor(XgbBaseActor):
def __init__(self):
super().__init__()
def unique(self, cols: Optional[List[Union[bool, int]]] = None):
res = None
# def f(x: pd.Series):
# out = np.unique(np.concatenate([x.iloc[0], res[x.name].iloc[0]]))
# return [out]
# although save memory, but much more slower
# for _, df in self.features.items():
# ################################################################################################
# # Do not try to use lambda x: pd.unique(x)!
# # Return a numpy.ndarray is dangerous in apply body, when you are not sure whether
# # the returned array length are the same or not, it will result in different format
# # of dataframes.
# ################################################################################################
# if cols is not None:
# df = df.iloc[:, cols]
# unique_df = df.apply(lambda x: [np.unique(x.to_numpy())])
# if res is None:
# res = unique_df
# else:
# res = unique_df.apply(f)
# Another version for cols=None, consumes more memory
res = []
for _, df in self.features.items():
if cols is None:
res.append(df)
else:
res.append(df[cols])
if res == []:
return None
else:
res = pd.concat(res).apply(lambda x: [np.unique(x.to_numpy())])
return res
def set_cat_features(self, names: List[str]):
self.cat_names = names
# for _, features in self.features.items():
# features[names] = features[names].astype('category')
return
def set_split_points(self, split_points: Dict[str, list]):
self.split_points = split_points
for feature_name in split_points:
self.split_point_bin_map[feature_name] = {}
for bin, split_point in enumerate(split_points[feature_name]):
if isinstance(split_point, list):
for v in split_point:
self.split_point_bin_map[feature_name][v] = bin
else:
self.split_point_bin_map[feature_name][split_point] = bin
return
def xgb_binning_phase1(self):
def f(x: pd.Series):
if x.name in self.cat_names:
return [x.value_counts()]
else:
return [(x.min(), x.max())]
# if x.dtypes == 'category':
# return [x.value_counts()]
# else:
# return [(x.min(), x.max())]
res = None
def g(x: pd.Series):
""" 1. x -- |(int, int)|
2. x -- |pd.Series|
"""
if isinstance(x.iloc[0], pd.Series):
y = pd.merge(x.iloc[0], res[x.name].iloc[0], how='outer', left_index=True, right_index=True).fillna(0)
counted_values = y[x.iloc[0].name+'_x'] + y[x.iloc[0].name+'_y']
counted_values.rename(x.iloc[0].name, inplace=True)
return [counted_values]
else:
a, b = x.iloc[0]
c, d = res[x.name].iloc[0]
min_v = min(a, c)
max_v = max(b, d)
return [(min_v, max_v)]
for _, features in self.features.items():
out = features.apply(f)
if res is None:
res = out
else:
res = out.apply(g)
return res
def xgb_binning_phase2(self,
num_bins: int,
split_points_df: pd.DataFrame):
if num_bins <= 256:
dtype = np.uint8
elif num_bins <= 2 ** 16:
dtype = np.uint16
else:
dtype = np.uint32
def f(x: pd.Series):
"""1. Categorial_1 -- |object|
# 2. Categorial_2 -- |np.int64|
2. Continuous -- |np.float64|
"""
x = x.iloc[0]
if x.dtype == object:
if isinstance(x[-1], list):
value_map = {v: i for i, v in enumerate(x[:-1])}
value_map.update({v: len(x)-1 for v in x[-1]})
else:
value_map = {v: i for i, v in enumerate(x)}
return [value_map]
else:
bins = [-float('inf')] + x.tolist() + [float('inf')]
return [bins]
split_points_df = split_points_df.apply(f)
def g(x: pd.Series):
binning_info = split_points_df[x.name].iloc[0]
if isinstance(binning_info, dict):
codes = x.map(binning_info)
else:
codes = pd.cut(x, bins=binning_info, labels=range(len(binning_info)-1))
return codes
for block_idx, features in self.features.items():
self.features[block_idx] = features.apply(g).astype(dtype)
return
class XgbTrainActor(XgbBaseActor):
def __init__(self):
super().__init__()
def recv_all_trees(self, boosting_tree: BoostingTree):
self.boosting_tree = boosting_tree
def recv_latest_tree(self, tree: Tree, lr: float, max_depth: int):
self.boosting_tree.append(tree, lr, max_depth)
def sync_config(self,
paillier_context: PaillierContext,
cat_smooth: float,
lambda_: float):
self.paillier_context = paillier_context
self.cat_smooth = cat_smooth
self.lambda_ = lambda_
def update_big_feature(self,
indices: Dict[int, Optional[np.ndarray]],
columns: Optional[List[str]],
grad: Optional[Dict[int, np.ndarray]],
hess: Optional[Dict[int, np.ndarray]],
grad_hess: Optional[Dict[int, np.ndarray]],
create_new: bool):
# For label trainer, new a big_feature directly.
# For trainer, the grad, hess or grad_hess are supposed to be ciphertext, so update a potion one time.
if create_new:
self.big_feature = {}
gc.collect()
for block_id, features in self.features.items():
if indices is not None and block_id not in indices:
# This block is not used because of sampling
continue
if grad_hess is None:
self.big_feature[block_id] = Feature.create(values=features,
indices=None if indices is None else indices[block_id],
columns=columns,
grad=grad[block_id],
hess=hess[block_id],
grad_hess=None)
else:
self.big_feature[block_id] = Feature.create(values=features,
indices=None if indices is None else indices[block_id],
columns=columns,
grad=None,
hess=None,
grad_hess=grad_hess[block_id])
return
def cal_hist_for_node(self,
node_id: str,
packed: bool,
calc_count: bool,
indices: Optional[Dict[int, np.ndarray]],
col_section: Optional[Tuple[int, int]]):
""" Calculate hist for this node_big_feature on selected feature columns.
Note: Categorial feature hist is not sorted here.
Args:
node_id (str): node's id in a tree.
packed (bool): if true, calc hist of column 'xfl_grad_hess', else, calc hist of columns 'xfl_grad' and 'xfl_hess'
indices (Optional[Dict[int, np.ndarray]]): selected sample indices of the node.
if indices is None, create a new self.node_big_feature equals to self.big_feature.
col_section (Optional[Tuple[int, int]]): a section for feature columns on the node_big_feature.
free_memory_after_execution: (bool): if true, delete the node_id key in self.node_big_feature and free the memroy.
"""
if len(self.big_feature.keys()) == 0:
return None
if node_id not in self.node_big_feature:
if indices is None:
self.node_big_feature[node_id] = self.big_feature
else:
self.node_big_feature[node_id] = {}
for block_idx in indices:
feature = self.big_feature[block_idx]
self.node_big_feature[node_id][block_idx] = feature.slice_by_indices(indices[block_idx])
node_big_feature = self.node_big_feature[node_id]
# hist_dict: Dict[int, Dict[str, pd.DataFrame]] = {}
first_feature_col = 1 if packed else 2
if col_section is None:
columns = node_big_feature[list(node_big_feature.keys())[0]].data.columns.tolist()[first_feature_col:] # for grad and hess
else:
columns = node_big_feature[
list(node_big_feature.keys())[0]].data.columns.tolist()[col_section[0]+first_feature_col:col_section[1]+first_feature_col]
agg_arg = {'sum', 'count'} if calc_count else {'sum'}
# num_samples_sum = sum([feature.data.shape[0] for block_id, feature in node_big_feature.items()])
# agg_feature = pd.DataFrame(columns=node_big_feature[list(node_big_feature.keys())[0]].data.columns,
# index=range(num_samples_sum))
agg_feature = pd.concat([feature.data for feature in node_big_feature.values()])
hist: Dict[str: pd.DataFrame] = {name: None for name in columns}
for name in columns:
if not packed:
hist[name] = agg_feature.groupby([name], observed=True)[['xfl_grad', 'xfl_hess']].agg(agg_arg)
else:
hist[name] = agg_feature.groupby([name], observed=True)[['xfl_grad_hess']].agg(agg_arg)
# for block_idx, feature in node_big_feature.items():
# hist_dict[block_idx] = {}
# for name in columns:
# if not packed:
# res = feature.data.groupby([name], observed=True)[['xfl_grad', 'xfl_hess']].agg(agg_arg)
# else:
# res = feature.data.groupby([name], observed=True)[['xfl_grad_hess']].agg(agg_arg)
# hist_dict[block_idx][name] = res
# hist: Dict[str: pd.DataFrame] = {name: None for name in columns}
# for col_name in hist:
# hist_list = [hist_dict[block_id][col_name] for block_id in hist_dict]
# if len(hist_list) == 1:
# hist[col_name] = hist_list[0]
# else:
# hist_df = pd.concat(hist_list)
# # numeric_only=False !!! Pandas bug.
# hist_df = hist_df.groupby(hist_df.index).sum(numeric_only=False)
# hist[col_name] = hist_df
return hist
def encrypt_grad_hess(self,
packed: bool,
block_id: int,
context: PaillierContext,
precision: Optional[float]):
if block_id not in self.big_feature:
# Actually not reach
if packed:
return np.array([])
else:
return [np.array([]), np.array([])]
big_feature_df: pd.DataFrame = self.big_feature[block_id].data
if packed:
grad = big_feature_df["xfl_grad"].to_numpy()
hess = big_feature_df["xfl_hess"].to_numpy()
data = embed([grad, hess], interval=(1 << 128), precision=64)
res = Paillier.encrypt(data=data,
context=context,
precision=0, # must be 0 if data is packed grad and hess
obfuscation=True,
num_cores=1)
res = Paillier.serialize(res, compression=False)
else:
data_grad = big_feature_df["xfl_grad"].to_numpy()
data_hess = big_feature_df["xfl_hess"].to_numpy()
data = [data_grad, data_hess]
res = []
for d in data:
out = Paillier.encrypt(data=d,
context=context,
precision=precision, # must be 0 if data is packed grad and hess
obfuscation=True,
num_cores=1)
res.append(out)
res = [Paillier.serialize(i, compression=False) for i in res]
return res
def filter_sample_index(self,
node_id: str,
feature_name: str,
condition: Union[int, List[int]]):
"""
Args:
node_id (str): node id
feature_name (str): feature name
condition (Union[int, List[int]]): if is cat feature, condition is List[int], else is int.
"""
if node_id not in self.node_big_feature.keys():
# No data in this actor
return {}
sample_index: Dict[int, list] = {}
for block_id, feature in self.node_big_feature[node_id].items():
# if feature.data[feature_name].dtype == 'category':
if feature_name in self.cat_names:
filter = feature.data[feature_name].isin(condition)
else:
filter = feature.data[feature_name] <= condition
if len(feature.data[filter]) != 0:
sample_index[block_id] = feature.data[filter].index.astype('int').tolist()
return sample_index
def free_node_big_feature(self, node_id: str):
if node_id in self.node_big_feature:
del self.node_big_feature[node_id]
gc.collect()
return
def merge_hist(self, hist_list_dict: Dict[str, List[pd.DataFrame]]):
out_hist_dict: Dict[str, pd.DataFrame] = {}
for col_name in hist_list_dict:
hist_df = pd.concat(hist_list_dict[col_name])
# numeric_only=False !!! Pandas bug.
hist_df = hist_df.groupby(hist_df.index).sum(numeric_only=False)
out_hist_dict[col_name] = hist_df
return out_hist_dict
def calc_split_info(self,
is_remote: bool,
hist_dict: Dict[str, pd.DataFrame],
cat_names: Optional[List[str]]):
def f(x):
y = Paillier.decrypt(self.paillier_context, x, num_cores=1, out_origin=True)
z = unpack(y, num=2)
return z
hint_split_info = {
'max_gain': -float('inf'),
"feature_name": None, # fake name for remote party
'split_bin': None, # fake bin for remote party
# "is_category": None,
"left_cat": None, # fake cat for remote party
"left_weight": None,
"right_weight": None,
'num_left_sample': None,
'num_right_sample': None
}
if not is_remote and cat_names is None:
cat_names = self.cat_names
for feature_name, feature_hist in hist_dict.items():
if len(feature_hist) <= 1:
# no split for this feature
continue
if is_remote and self.paillier_context:
if ('xfl_grad_hess', 'sum') in feature_hist.columns:
feature_hist[('xfl_grad_hess', 'sum')] = feature_hist[('xfl_grad_hess', 'sum')].apply(f)
grad_hess_ndarray = np.array(feature_hist[('xfl_grad_hess', 'sum')].to_list()).astype(np.float32)
count = feature_hist[('xfl_grad_hess', 'count')].to_numpy()
feature_hist = pd.DataFrame(
np.concatenate([grad_hess_ndarray, count[:, np.newaxis]], axis=1),
index=feature_hist.index,
columns=pd.MultiIndex.from_tuples([('xfl_grad', 'sum'), ('xfl_hess', 'sum'), ('xfl_grad', 'count')])
)
else:
feature_hist[[('xfl_grad', 'sum'), ('xfl_hess', 'sum')]] = \
feature_hist[[('xfl_grad', 'sum'), ('xfl_hess', 'sum')]].apply(lambda x: Paillier.decrypt(self.paillier_context, x, num_cores=1, out_origin=False))
is_category = feature_name in cat_names
if is_category:
cat_rank = cal_cat_rank(feature_hist[('xfl_grad', 'sum')],
feature_hist[('xfl_hess', 'sum')],
self.cat_smooth)
cat_rank.sort_values(inplace=True)
feature_hist = feature_hist.loc[cat_rank.index]
feature_hist = feature_hist.cumsum(axis=0)
feature_hist.rename(columns={"sum": "cum_sum", "count": "cum_count"}, inplace=True)
cum_grad = feature_hist[('xfl_grad', 'cum_sum')].to_numpy()
cum_hess = feature_hist[('xfl_hess', 'cum_sum')].to_numpy()
gains = cal_gain(cum_grad, cum_hess, self.lambda_)
max_gain_index = np.argmax(gains)
feature_max_gain = gains[max_gain_index]
if feature_max_gain > hint_split_info['max_gain']:
count_hist = feature_hist[('xfl_grad', 'cum_count')]
num_left_sample = count_hist.iloc[max_gain_index]
num_right_sample = count_hist.iloc[-1] - count_hist.iloc[max_gain_index]
if is_category:
self.out_left_cat = []
left_cat = feature_hist.index.to_list()[:max_gain_index + 1]
split_bin = None
else:
left_cat = None
# convert to global index of split points of this feature, only for continuous feature
split_bin = int(feature_hist.index[max_gain_index])
left_weight = cal_weight(cum_grad[max_gain_index],
cum_hess[max_gain_index],
self.lambda_)
right_weight = cal_weight(cum_grad[-1] - cum_grad[max_gain_index],
cum_hess[-1] - cum_hess[max_gain_index],
self.lambda_)
hint_split_info['max_gain'] = feature_max_gain
hint_split_info['feature_name'] = feature_name
hint_split_info['split_bin'] = split_bin
hint_split_info['left_cat'] = left_cat
hint_split_info['is_category'] = is_category
hint_split_info['left_weight'] = left_weight
hint_split_info['right_weight'] = right_weight
hint_split_info['num_left_sample'] = num_left_sample
hint_split_info['num_right_sample'] = num_right_sample
return hint_split_info
def make_indicator_for_prediction_on_tree(self, tree: Tree, local_party_id: str, dataset_type: str):
if dataset_type == "train":
dataset = self.features
elif dataset_type == "val":
dataset = self.val_features
elif dataset_type == 'test':
dataset = self.test_features
else:
raise ValueError(f"Dataset type {dataset_type} is not valid, supported types are 'train' and 'val'.")
# Dict[node_id, Dict[block_id, indicator]]
indicator: Dict[str, Dict[int, np.ndarray]] = {}
for node_id, node in tree.nodes.items():
if not node.is_leaf and node.split_info.owner_id == local_party_id:
indicator[node_id] = {}
feature_name = node.split_info.feature_name
if node.split_info.is_category:
if dataset_type == 'train':
left_cat = list(set([self.split_point_bin_map[feature_name][v] for v in node.split_info.left_cat]))
else:
left_cat = node.split_info.left_cat
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = np.isin(data, left_cat)
else:
if dataset_type == 'train':
split_point = self.split_point_bin_map[feature_name][node.split_info.split_point]
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
else:
split_point = node.split_info.split_point
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
# Dict[node_id, Dict[block_id, indicator]] -> Dict[block_id, Dict[node_id, indicator]]
out_indicator: Dict[int, Dict[str, np.ndarray]] = {}
for node_id in indicator:
for block_id, data in indicator[node_id].items():
if block_id not in out_indicator:
out_indicator[block_id] = {}
out_indicator[block_id][node_id] = data
return out_indicator
def make_indicator_for_prediction_on_boosting_tree(self, boosting_tree: BoostingTree, local_party_id: str, dataset_type: str):
if dataset_type == "train":
dataset = self.features
elif dataset_type == "val":
dataset = self.val_features
elif dataset_type == 'test':
dataset = self.test_features
else:
raise ValueError(f"Dataset type {dataset_type} is not valid, supported types are 'train' and 'val'.")
# Dict[node_id, Dict[block_id, indicator]]
indicator: Dict[str, Dict[int, np.ndarray]] = {}
for tree in boosting_tree.trees:
for node_id, node in tree.nodes.items():
if not node.is_leaf and node.split_info.owner_id == local_party_id:
indicator[node_id] = {}
feature_name = node.split_info.feature_name
if node.split_info.is_category:
if dataset_type == 'train':
left_cat = list(set([self.split_point_bin_map[feature_name][v] for v in node.split_info.left_cat]))
else:
left_cat = node.split_info.left_cat
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = np.isin(data, left_cat)
else:
if dataset_type == 'train':
split_point = self.split_point_bin_map[feature_name][node.split_info.split_point]
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
else:
split_point = node.split_info.split_point
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
# Dict[node_id, Dict[block_id, indicator]] -> Dict[block_id, Dict[node_id, indicator]]
out_indicator: Dict[int, Dict[str, np.ndarray]] = {}
for node_id in indicator:
for block_id, data in indicator[node_id].items():
if block_id not in out_indicator:
out_indicator[block_id] = {}
out_indicator[block_id][node_id] = data
return out_indicator
def make_indicator_for_prediction_on_nodes(self, nodes: Dict[str, Node], dataset_type: str):
if dataset_type == "train":
dataset = self.features
elif dataset_type == "val":
dataset = self.val_features
elif dataset_type == 'test':
dataset = self.test_features
else:
raise ValueError(f"Dataset type {dataset_type} is not valid, supported types are 'train' and 'val'.")
# Dict[node_id, Dict[block_id, indicator]]
indicator: Dict[str, Dict[int, np.ndarray]] = {}
for node_id, node in nodes.items():
indicator[node_id] = {}
feature_name = node.split_info.feature_name
if node.split_info.is_category:
if dataset_type == 'train':
left_cat = list(set([self.split_point_bin_map[feature_name][v] for v in node.split_info.left_cat]))
else:
left_cat = node.split_info.left_cat
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = np.isin(data, left_cat)
else:
if dataset_type == 'train':
split_point = self.split_point_bin_map[feature_name][node.split_info.split_point]
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
else:
split_point = node.split_info.split_point
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
# Dict[node_id, Dict[block_id, indicator]] -> Dict[block_id, Dict[node_id, indicator]]
out_indicator: Dict[int, Dict[str, np.ndarray]] = {}
for node_id in indicator:
for block_id, data in indicator[node_id].items():
if block_id not in out_indicator:
out_indicator[block_id] = {}
out_indicator[block_id][node_id] = data
return out_indicator
def _gen_prediction(self, tree: Tree, indicator: Dict[str, np.ndarray]):
num_samples = list(indicator.values())[0].shape[0]
prediction = np.zeros((num_samples,), dtype=np.float32)
depth = 0
sample_in_node = {}
while True:
node_list = tree.search_nodes(depth)
if not node_list:
break
for node in node_list:
if node.is_leaf:
prediction[sample_in_node[node.id]] = node.weight
else:
if depth == 0:
sample_in_node[node.left_node_id] = np.where(indicator[node.id] == 1)[0]
sample_in_node[node.right_node_id] = np.where(indicator[node.id] == 0)[0]
else:
sample_in_node[node.left_node_id] = np.intersect1d(
sample_in_node[node.id], np.where(indicator[node.id] == 1)[0])
sample_in_node[node.right_node_id] = np.intersect1d(
sample_in_node[node.id], np.where(indicator[node.id] == 0)[0])
depth += 1
return prediction
def predict_on_tree(self, tree: Tree, indicator: Dict[int, Dict[str, np.ndarray]]):
prediction: Dict[int, np.ndarray] = {}
for block_id, indicator_dict in indicator.items():
prediction[block_id] = self._gen_prediction(tree, indicator_dict)
return prediction
def predict_on_boosting_tree(self, boosting_tree: BoostingTree, indicator: Dict[int, Dict[str, np.ndarray]]):
prediction: Dict[int, np.ndarray] = {}
for tree_idx, tree in enumerate(boosting_tree.trees):
for block_id, indicator_dict in indicator.items():
p = self._gen_prediction(tree, indicator_dict)
if block_id not in prediction:
prediction[block_id] = p * boosting_tree.lr[tree_idx]
else:
prediction[block_id] += p * boosting_tree.lr[tree_idx]
return prediction
@ray.remote(num_cpus=1)
class XgbActor(XgbDataFrameActor, XgbTrainActor):
def __init__(self):
super().__init__()
def recv_data(self,
data,
file_type: str,
is_centralized: bool,
dataset_type: str,
has_label: bool,
missing_values: List[float]):
if is_centralized:
if file_type == 'csv':
if dataset_type == 'train':
features, label = RayCentralCsvActor.recv_data(data,
has_label,
missing_values)
self.features.update(features)
self.label.update(label)
elif dataset_type == 'val':
val_features, val_label = RayCentralCsvActor.recv_data(data,
has_label,
missing_values)
self.val_features.update(val_features)
self.val_label.update(val_label)
else:
test_features, test_label = RayCentralCsvActor.recv_data(data,
has_label,
missing_values)
self.test_features.update(test_features)
# self.test_label.update(test_label)
else:
raise NotImplementedError
else:
raise NotImplementedError
return
| 34,789 | 43.375 | 171 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/dataloader_head.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from algorithm.core.tree_ray.xgb_actor import XgbActor
from algorithm.core.tree_ray.csv_scatter import scatter_csv_data
class XgbDataLoaderHead:
def __init__(self):
super().__init__()
@classmethod
def scatter_data(cls,
path: Union[str, list[str]],
dataset_type: str,
ray_actors: list[XgbActor],
has_id: bool = True,
has_label: bool = True,
missing_values: Union[float, List[float]] = [],
atomic_row_size_per_cpu_core: int = 5000,
is_centralized: bool = True,
file_type: str = 'csv'):
if is_centralized:
if file_type == 'csv':
dataset_info = \
scatter_csv_data(path,
dataset_type,
ray_actors,
has_id,
has_label,
missing_values,
atomic_row_size_per_cpu_core)
else:
raise NotImplementedError
else:
NotImplementedError
return dataset_info
| 1,923 | 36 | 74 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/assign_ray_input.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Dict
import numpy as np
from ray.actor import ActorHandle
from .dataset_info import RayDatasetInfo
def gen_actor_indices(indices: np.ndarray,
dataset_info: RayDatasetInfo):
if indices is None:
# out_indices_dict = {k: None for k in dataset_info.actor_to_block_map}
out_indices_dict = {}
for actor, blocks in dataset_info.actor_to_block_map.items():
if len(blocks) != 0:
out_indices_dict[actor] = None
else:
rows = sum([shape[0] for shape in dataset_info.shape])
bool_indices = np.zeros((rows,), dtype='bool')
bool_indices[indices] = True
indices_dict: Dict[int, np.ndarray] = {}
out_indices_dict: Dict[ActorHandle, Dict[int, np.ndarray]] = {}
start_row = 0
for block_id, shape in dataset_info.blocks_shape.items():
end_row = start_row + shape[0]
idx = np.where(bool_indices[start_row: end_row])[0]
if len(idx) == 0:
# Avoid the block where no sample is selected
pass
else:
indices_dict[block_id] = idx + start_row
start_row = end_row
for actor, block_id_list in dataset_info.actor_to_block_map.items():
idx = {k: indices_dict[k] for k in block_id_list if k in indices_dict}
if idx != {}:
# Avoid the actor if no data is selected for this actor
out_indices_dict[actor] = idx
return out_indices_dict
def gen_actor_input(indices: Optional[np.ndarray],
grad: Optional[np.ndarray],
hess: Optional[np.ndarray],
grad_hess: Optional[np.ndarray],
dataset_info: RayDatasetInfo) -> list:
""" Generate indices, grad, hess, grad_hess for each actor.
Only one of the grad_hess and (grad, hess) is used, depending on whether grad_hess is None.
The size of grad, hess or grad_hess is equal to the size of indices.
Args:
indices (Optional[np.ndarray]): row indices selected for a tree node, the values should in ascend order.
grad (Optional[np.ndarray]): grad
hess (Optional[np.ndarray]): hess
grad_hess (Optional[np.ndarray]): packed grad hess (ciphertext)
dataset_info (RayDatasetInfo): dataset info
Returns:
list: list of indices, grad, hess, grad_hess for each actor
"""
block_interval_map = {}
if indices is None:
# out_indices_dict = {k: None for k in dataset_info.actor_to_block_map}
out_indices_dict = {}
for actor, blocks in dataset_info.actor_to_block_map.items():
if len(blocks) != 0:
out_indices_dict[actor] = {actor: None}
start_idx = 0
for block_id, shape in dataset_info.blocks_shape.items():
end_idx = start_idx + shape[0]
block_interval_map[block_id] = [start_idx, end_idx]
start_idx = end_idx
else:
rows = sum([shape[0] for shape in dataset_info.shape])
bool_indices = np.zeros((rows,), dtype='bool')
bool_indices[indices] = True
indices_dict: Dict[int, np.ndarray] = {}
out_indices_dict: Dict[ActorHandle, Dict[int, np.ndarray]] = {}
start_row = 0
start_idx = 0
for block_id, shape in dataset_info.blocks_shape.items():
end_row = start_row + shape[0]
idx = np.where(bool_indices[start_row: end_row])[0]
if len(idx) == 0:
# Avoid the block where no sample is selected
pass
else:
indices_dict[block_id] = idx + start_row
end_idx = start_idx + len(idx)
block_interval_map[block_id] = [start_idx, end_idx]
start_idx = end_idx
start_row = end_row
for actor, block_id_list in dataset_info.actor_to_block_map.items():
idx = {k: indices_dict[k] for k in block_id_list if k in indices_dict}
if idx != {}:
# Avoid the actor if no data is selected for this actor
out_indices_dict[actor] = idx
if grad_hess is not None:
grad_hess_dict: Dict[int, np.ndarray] = {}
out_grad_hess_dict: Dict[ActorHandle, Dict[int, np.ndarray]] = {}
out_grad_dict = None
out_hess_dict = None
for block_id, (start_idx, end_idx) in block_interval_map.items():
grad_hess_dict[block_id] = grad_hess[start_idx: end_idx]
for actor, block_id_list in dataset_info.actor_to_block_map.items():
idx = {k: grad_hess_dict[k] for k in block_id_list if k in grad_hess_dict}
if idx != {}:
out_grad_hess_dict[actor] = idx
else:
grad_dict: Dict[int, np.ndarray] = {}
hess_dict: Dict[int, np.ndarray] = {}
out_grad_dict: Dict[ActorHandle, Dict[int, np.ndarray]] = {}
out_hess_dict: Dict[ActorHandle, Dict[int, np.ndarray]] = {}
out_grad_hess_dict = None
for block_id, (start_idx, end_idx) in block_interval_map.items():
grad_dict[block_id] = grad[start_idx: end_idx]
hess_dict[block_id] = hess[start_idx: end_idx]
for actor, block_id_list in dataset_info.actor_to_block_map.items():
idx_grad, idx_hess = {}, {}
for k in block_id_list:
if k in grad_dict:
idx_grad[k] = grad_dict[k]
idx_hess[k] = hess_dict[k]
if idx_grad != {}:
out_grad_dict[actor] = idx_grad
out_hess_dict[actor] = idx_hess
return out_indices_dict, out_grad_dict, out_hess_dict, out_grad_hess_dict | 6,458 | 40.403846 | 112 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/tree_train_head.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Optional, Dict, List, Union
import ray
from ray.actor import ActorHandle
import numpy as np
import pandas as pd
from algorithm.core.tree_ray.assign_ray_input import gen_actor_input
from algorithm.core.tree_ray.xgb_actor import XgbTrainActor
from algorithm.core.tree_ray.dataset_info import RayDatasetInfo
from algorithm.core.tree.tree_structure import BoostingTree, Tree, Node
from common.crypto.paillier.paillier import PaillierContext
class TreeTrainHead:
def __init__(self, ray_actors: list[XgbTrainActor], ray_tasks_num_returns: Optional[int] = None):
self.ray_actors = ray_actors
self.NONE_REF = ray.put(None)
self.TRUE_REF = ray.put(True)
self.FALSE_REF = ray.put(False)
self.ray_tasks_num_returns = ray_tasks_num_returns
def sync_all_trees(self, boosting_tree: BoostingTree):
boosting_tree_ref = ray.put(boosting_tree)
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.recv_boosting_tree.remote(boosting_tree_ref))
# ray.get(ray_tasks)
return
def sync_latest_tree(self, tree: Tree, lr: float, max_depth: int):
tree_ref = ray.put(tree)
lr_ref = ray.put(lr)
max_depth = ray.put(max_depth)
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.recv_decision_tree.remote(tree_ref, lr_ref, max_depth))
# ray.get(ray_tasks)
return
def sync_config(self,
paillier_context: PaillierContext,
cat_smooth: float,
lambda_: float):
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.sync_config.remote(paillier_context,
cat_smooth,
lambda_))
# ray.get(ray_tasks)
return
def new_big_feature(self,
indices: Optional[np.ndarray],
columns: Optional[List[str]],
grad: Optional[np.ndarray],
hess: Optional[np.ndarray],
grad_hess: Optional[np.ndarray],
dataset_info: RayDatasetInfo):
indices, grad, hess, grad_hess = gen_actor_input(indices,
grad,
hess,
grad_hess,
dataset_info)
indices_ref = {k: ray.put(v) if v is not None else self.NONE_REF for k, v in indices.items()}
columns_ref = self.NONE_REF if columns is None else ray.put(columns)
grad_ref = self.NONE_REF if grad is None else {k: ray.put(v) for k, v in grad.items()}
hess_ref = self.NONE_REF if hess is None else {k: ray.put(v) for k, v in hess.items()}
grad_hess_ref = self.NONE_REF if grad_hess is None else {k: ray.put(v) for k, v in grad_hess.items()}
ray_tasks = []
for actor in indices_ref:
ray_tasks.append(actor.update_big_feature.remote(indices_ref[actor],
columns_ref,
grad_ref[actor] if isinstance(grad_ref, dict) else grad_ref,
hess_ref[actor] if isinstance(hess_ref, dict) else hess_ref,
grad_hess_ref[actor] if isinstance(grad_hess_ref, dict) else grad_hess_ref,
create_new=True))
# ray.get(ray_tasks)
out_indices = {}
out_grad = {}
out_hess = {}
out_grad_hess = {}
for actor in indices.keys():
out_indices.update(indices[actor])
if grad is not None:
out_grad.update(grad[actor])
if hess is not None:
out_hess.update(hess[actor])
if grad_hess is not None:
out_grad_hess.update(grad_hess[actor])
return out_indices, out_grad, out_hess, out_grad_hess
def gen_big_feature_updater(self,
block_to_actor_map: Dict[int, ActorHandle],
columns: Optional[List[str]]):
NONE_REF = self.NONE_REF
class BigFeatureUpdater:
def __init__(self, block_to_actor_map, columns):
self.block_to_actor_map = block_to_actor_map
self.columns_ref = NONE_REF if columns is None else ray.put(columns)
self.is_created = {}
def update(self,
indices: Dict[int, Optional[np.ndarray]],
grad: Optional[Dict[int, np.ndarray]],
hess: Optional[Dict[int, np.ndarray]],
grad_hess: Optional[Dict[int, np.ndarray]]):
ray_tasks = []
for block_id in indices.keys():
indices_ref = ray.put({block_id: indices[block_id]})
grad_ref = NONE_REF if grad is None else ray.put(grad)
hess_ref = NONE_REF if hess is None else ray.put(hess)
grad_hess_ref = NONE_REF if grad_hess is None else ray.put(grad_hess)
actor = self.block_to_actor_map[block_id]
ray_tasks.append(actor.update_big_feature.remote(indices_ref,
self.columns_ref,
grad_ref,
hess_ref,
grad_hess_ref,
create_new=not self.is_created.get(actor, False))) # note it is judged by actor
self.is_created[actor] = True
# ray.get(ray_tasks)
return
big_feature_updater = BigFeatureUpdater(block_to_actor_map, columns)
return big_feature_updater
def _merge_hist(self, hist_dict_list: List[Dict[str, pd.DataFrame]]):
hist_list_dict = {}
for i, hist_dict in enumerate(hist_dict_list):
if hist_dict is None:
continue
for col_name, hist in hist_dict.items():
if col_name not in hist_list_dict:
hist_list_dict[col_name] = []
hist_list_dict[col_name].append(hist_dict_list[i][col_name])
num = min(len(self.ray_actors), len(hist_list_dict))
ray_hist_dict_list = [{} for _ in range(num)]
for i, feature_name in enumerate(hist_list_dict.keys()):
ray_hist_dict_list[i % len(ray_hist_dict_list)].update({feature_name: hist_list_dict[feature_name]})
ray_hist_dict_list = [ray.put(item) for item in ray_hist_dict_list]
random.shuffle(ray_hist_dict_list)
ray_tasks = []
for i in range(num):
actor = self.ray_actors[i]
ray_tasks.append(actor.merge_hist.remote(ray_hist_dict_list[i]))
if self.ray_tasks_num_returns is None:
ray_tasks_num_returns = len(ray_tasks)
else:
ray_tasks_num_returns = min(self.ray_tasks_num_returns, len(ray_tasks))
out_hist_dict: Dict[str, pd.DataFrame] = {}
while len(ray_tasks):
done_task, ray_tasks = ray.wait(ray_tasks, num_returns=ray_tasks_num_returns)
out_list = ray.get(done_task)
for out in out_list:
out_hist_dict.update(out)
return out_hist_dict
def gen_node_hist_iterator(self,
node_id: str,
packed: bool,
calc_count: bool,
indices: Dict[ActorHandle, Dict[int, np.ndarray]],
num_features: Optional[int],
step: Optional[int]):
node_id_ref = ray.put(node_id)
packed_ref = self.TRUE_REF if packed else self.FALSE_REF
calc_count_ref = self.TRUE_REF if calc_count else self.FALSE_REF
indices_ref = {k: ray.put(v) if v is not None else self.NONE_REF for k, v in indices.items()}
def gather_hist(node_id_ref, indices_ref, col_section_ref):
ray_tasks = []
for actor, indices_dict_ref in indices_ref.items():
ray_tasks.append(actor.cal_hist_for_node.remote(node_id_ref,
packed_ref,
calc_count_ref,
indices_dict_ref,
col_section_ref))
hist_dict_list = ray.get(ray_tasks)
hist_dict: Dict[str, pd.DataFrame] = self._merge_hist(hist_dict_list)
return hist_dict
if step is None:
hist = gather_hist(node_id_ref, indices_ref, self.NONE_REF)
return [(hist, 0)]
else:
boundary_points = list(range(0, num_features, step)) + [num_features]
class HistIterator:
def __iter__(self):
self.boundaray_iterator = zip(boundary_points[:-1], boundary_points[1:])
self.round_left = len(boundary_points[:-1])
return self
def __next__(self):
try:
a, b = next(self.boundaray_iterator)
col_section_ref = ray.put([a, b])
hist = gather_hist(node_id_ref,
indices_ref,
col_section_ref)
self.round_left -= 1
return hist, self.round_left
except StopIteration:
raise StopIteration
return HistIterator()
def encrypt_grad_hess(self,
packed: bool,
block_to_actor_map: Dict[int, ActorHandle],
context: PaillierContext,
precision: Optional[float],
lazy_return: bool):
packed_ref = self.TRUE_REF if packed else self.FALSE_REF
context_ref = ray.put(context)
precision_ref = ray.put(precision)
ray_tasks = []
task_block_idx_map = {}
for block_id in block_to_actor_map:
block_id_ref = ray.put(block_id)
ray_tasks.append(block_to_actor_map[block_id].encrypt_grad_hess.remote(packed_ref,
block_id_ref,
context_ref,
precision_ref))
task_block_idx_map[ray_tasks[-1]] = block_id
if lazy_return:
class EncryptedGradHess:
def __iter__(self):
self.ray_tasks = ray_tasks
self.task_block_idx_map = task_block_idx_map
return self
def __next__(self):
if len(self.ray_tasks) != 0:
done_task, self.ray_tasks = ray.wait(self.ray_tasks)
block_idx = self.task_block_idx_map[done_task[0]]
out = ray.get(done_task[0])
return block_idx, out, len(self.ray_tasks)
else:
raise StopIteration
return EncryptedGradHess()
else:
res = ray.get(ray_tasks)
return [(None, res, 0)]
def filter_sample_index(self,
node_id: str,
feature_name: str,
condition: Union[int, List[int]]):
node_id_ref = ray.put(node_id)
feature_name_ref = ray.put(feature_name)
condition_ref = ray.put(condition)
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.filter_sample_index.remote(node_id_ref,
feature_name_ref,
condition_ref))
sample_index: Dict[str, list] = {}
# import time
# start = time.time()
while len(ray_tasks):
if self.ray_tasks_num_returns is None:
ray_tasks_num_returns = len(ray_tasks)
else:
ray_tasks_num_returns = min(self.ray_tasks_num_returns, len(ray_tasks))
done_task, ray_tasks = ray.wait(ray_tasks, num_returns=ray_tasks_num_returns)
out_list = ray.get(done_task)
for out in out_list:
sample_index.update(out)
# done_task, ray_tasks = ray.wait(ray_tasks)
# out = ray.get(done_task[0])
# sample_index.update(out)
# print(time.time() - start, '----')
return sample_index
def free_node_big_feature(self, node_id: str):
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.free_node_big_feature.remote(node_id))
# ray.get(ray_tasks)
return
def calc_split_info(self,
is_remote: bool,
hist_dict: Dict[str, pd.DataFrame],
cat_names: List[str],
lazy_return: bool):
is_remote_ref = self.TRUE_REF if is_remote else self.FALSE_REF
cat_names_ref = ray.put(cat_names) if cat_names is not None else self.NONE_REF
num = min(len(self.ray_actors), len(hist_dict))
hist_dict_list = [{} for _ in range(num)]
for i, feature_name in enumerate(hist_dict.keys()):
hist_dict_list[i % len(hist_dict_list)].update({feature_name: hist_dict[feature_name]})
hist_dict_ref_list = [ray.put(item) for item in hist_dict_list]
random.shuffle(hist_dict_ref_list)
ray_tasks = []
for i in range(num):
actor = self.ray_actors[i]
ray_tasks.append(actor.calc_split_info.remote(is_remote_ref,
hist_dict_ref_list[i],
cat_names_ref))
if lazy_return:
ray_tasks_num_returns = self.ray_tasks_num_returns
class HintSplitInfoIterator:
def __iter__(self):
self.ray_tasks = ray_tasks
self.ray_tasks_num_returns = ray_tasks_num_returns
return self
def __next__(self):
if len(self.ray_tasks) != 0:
if self.ray_tasks_num_returns is None:
ray_tasks_num_returns = len(self.ray_tasks)
else:
ray_tasks_num_returns = min(self.ray_tasks_num_returns, len(self.ray_tasks))
done_task, self.ray_tasks = ray.wait(self.ray_tasks, num_returns=ray_tasks_num_returns)
hint_split_info_list = ray.get(done_task)
best_hint_split_info = hint_split_info_list[0]
for hint_split_info in hint_split_info_list[1:]:
if hint_split_info['max_gain'] > best_hint_split_info['max_gain']:
best_hint_split_info = hint_split_info
# done_task, self.ray_tasks = ray.wait(self.ray_tasks)
# hint_split_info = ray.get(done_task[0])
# return hint_split_info, len(self.ray_tasks)
return best_hint_split_info, len(self.ray_tasks)
else:
raise StopIteration
return HintSplitInfoIterator()
else:
hint_split_info = ray.get(ray_tasks)
return [(hint_split_info, 0)]
def make_indicator_for_prediction_on_tree(self, tree: Tree, local_party_id: str, dataset_type: str):
tree_ref = ray.put(tree)
local_party_id_ref = ray.put(local_party_id)
dataset_type_ref = ray.put(dataset_type)
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.make_indicator_for_prediction_on_tree.remote(tree_ref,
local_party_id_ref,
dataset_type_ref))
indicator: Dict[int, Dict[str, np.ndarray]] = {}
while len(ray_tasks):
if self.ray_tasks_num_returns is None:
ray_tasks_num_returns = len(ray_tasks)
else:
ray_tasks_num_returns = min(self.ray_tasks_num_returns, len(ray_tasks))
done_task, ray_tasks = ray.wait(ray_tasks, num_returns=ray_tasks_num_returns)
out_list = ray.get(done_task)
for out in out_list:
indicator.update(out)
# done_task, ray_tasks = ray.wait(ray_tasks)
# out = ray.get(done_task[0])
# indicator.update(out)
return indicator
def make_indicator_for_prediction_on_boosting_tree(self, boosting_tree: BoostingTree, local_party_id: str, dataset_type: str):
boosting_tree_ref = ray.put(boosting_tree)
local_party_id_ref = ray.put(local_party_id)
dataset_type_ref = ray.put(dataset_type)
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.make_indicator_for_prediction_on_boosting_tree.remote(boosting_tree_ref,
local_party_id_ref,
dataset_type_ref))
indicator: Dict[int, Dict[str, np.ndarray]] = {}
while len(ray_tasks):
if self.ray_tasks_num_returns is None:
ray_tasks_num_returns = len(ray_tasks)
else:
ray_tasks_num_returns = min(self.ray_tasks_num_returns, len(ray_tasks))
done_task, ray_tasks = ray.wait(ray_tasks, num_returns=ray_tasks_num_returns)
out_list = ray.get(done_task)
for out in out_list:
indicator.update(out)
# done_task, ray_tasks = ray.wait(ray_tasks)
# out = ray.get(done_task[0])
# indicator.update(out)
return indicator
def make_indicator_for_prediction_on_nodes(self, nodes: Dict[str, Node], dataset_type: str):
nodes_ref = ray.put(nodes)
dataset_type_ref = ray.put(dataset_type)
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.make_indicator_for_prediction_on_nodes.remote(nodes_ref,
dataset_type_ref))
indicator: Dict[int, Dict[str, np.ndarray]] = {}
while len(ray_tasks):
if self.ray_tasks_num_returns is None:
ray_tasks_num_returns = len(ray_tasks)
else:
ray_tasks_num_returns = min(self.ray_tasks_num_returns, len(ray_tasks))
done_task, ray_tasks = ray.wait(ray_tasks, num_returns=ray_tasks_num_returns)
out_list = ray.get(done_task)
for out in out_list:
indicator.update(out)
# done_task, ray_tasks = ray.wait(ray_tasks)
# out = ray.get(done_task[0])
# indicator.update(out)
return indicator
def predict_on_tree(self,
tree: Tree,
indicator: Dict[int, Dict[str, np.ndarray]],
actor_to_block_map: Dict[ActorHandle, int]):
tree_ref = ray.put(tree)
ray_tasks = []
for actor in self.ray_actors:
indicator_ref = ray.put({block_id: indicator[block_id] for block_id in actor_to_block_map[actor]})
ray_tasks.append(actor.predict_on_tree.remote(tree_ref, indicator_ref))
prediction: Dict[int, np.ndarray] = {}
while len(ray_tasks):
if self.ray_tasks_num_returns is None:
ray_tasks_num_returns = len(ray_tasks)
else:
ray_tasks_num_returns = min(self.ray_tasks_num_returns, len(ray_tasks))
done_task, ray_tasks = ray.wait(ray_tasks, num_returns=ray_tasks_num_returns)
out_list = ray.get(done_task)
for out in out_list:
prediction.update(out)
# done_task, ray_tasks = ray.wait(ray_tasks)
# out = ray.get(done_task[0])
# prediction.update(out)
prediction = np.concatenate([prediction[i] for i in range(len(prediction))])
return prediction
def predict_on_boosting_tree(self,
boosting_tree: BoostingTree,
indicator: Dict[int, Dict[str, np.ndarray]],
actor_to_block_map: Dict[ActorHandle, int]):
boosting_tree_ref = ray.put(boosting_tree)
ray_tasks = []
for actor in self.ray_actors:
indicator_ref = ray.put({block_id: indicator[block_id] for block_id in actor_to_block_map[actor]})
ray_tasks.append(actor.predict_on_boosting_tree.remote(boosting_tree_ref, indicator_ref))
prediction: Dict[int, np.ndarray] = {}
while len(ray_tasks):
if self.ray_tasks_num_returns is None:
ray_tasks_num_returns = len(ray_tasks)
else:
ray_tasks_num_returns = min(self.ray_tasks_num_returns, len(ray_tasks))
done_task, ray_tasks = ray.wait(ray_tasks, num_returns=ray_tasks_num_returns)
out_list = ray.get(done_task)
for out in out_list:
prediction.update(out)
# done_task, ray_tasks = ray.wait(ray_tasks)
# out = ray.get(done_task[0])
# prediction.update(out)
prediction = np.concatenate([prediction[i] for i in range(len(prediction))])
return prediction
| 24,055 | 45.084291 | 149 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/core/tree_ray/csv_scatter.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
from typing import List, Union
import numpy as np
import pandas as pd
import ray
from algorithm.core.tree_ray.xgb_actor import XgbActor
from algorithm.core.tree_ray.dataset_info import RayDatasetInfo
def scatter_csv_data(path_list: list[str],
dataset_type: str,
ray_actors: list[XgbActor],
has_id: bool = True,
has_label: bool = True,
missing_values: Union[float, List[float]] = [],
atomic_row_size_per_cpu_core: int = 5000):
shape = []
col_names = []
feature_names = []
obj_ref_list = []
label_name = None
num_cols = 0
global_rows = 0
block_count = 0
incomplete_df = None
index_col = 0 if has_id else False
dataset_info = RayDatasetInfo()
dataset_info.actor_to_block_map = {id: [] for id in ray_actors}
labels: List[np.ndarray] = []
indices: List[np.ndarray] = []
for file_index, path in enumerate(path_list):
iter_count = 0
num_rows = 0
if incomplete_df is None:
df_iterators = [
pd.read_csv(path,
index_col=index_col,
chunksize=atomic_row_size_per_cpu_core)
]
else:
skiprows = atomic_row_size_per_cpu_core - len(incomplete_df)
df_iterators = [
pd.read_csv(path,
index_col=index_col,
chunksize=skiprows),
]
try:
df_iterator2 = pd.read_csv(path,
index_col=index_col,
skiprows=range(1, skiprows+1),
chunksize=atomic_row_size_per_cpu_core)
df_iterators.append(df_iterator2)
except pd.errors.EmptyDataError:
pass
df_index = 0
file_type_ref = ray.put('csv')
is_centralized_ref = ray.put(True)
dataset_type_ref = ray.put(dataset_type)
has_label_ref = ray.put(has_label)
missing_values_ref = ray.put(missing_values)
while True:
try:
if len(df_iterators) == 2 and iter_count == 1:
df_index = 1
df: pd.DataFrame = next(df_iterators[df_index]).astype(np.float32)
if df.shape[0] == 0:
break
num_rows += len(df)
global_rows += len(df)
if has_label:
labels.append(df.iloc[:, 0].to_numpy())
indices.append(df.index.to_numpy())
if iter_count == 0:
if file_index == 0:
num_cols = len(df.columns)
col_names = df.columns.tolist()
if has_label:
feature_names = col_names[1:]
label_name = col_names[0]
else:
feature_names = col_names
label_name = None
else:
if len(df.columns) != num_cols:
raise ValueError(
f"Number of columns of files provided are not the same. {num_cols} != {len(df.columns)}")
if df.columns.tolist() != col_names:
raise ValueError(
"Column names of files provided are not the same.")
if incomplete_df is not None:
df = pd.concat([incomplete_df, df], axis=0)
if len(df) == atomic_row_size_per_cpu_core:
block_index = block_count
sample_start_index = block_index * atomic_row_size_per_cpu_core
sample_indices = np.arange(sample_start_index, sample_start_index + atomic_row_size_per_cpu_core)
df.set_index(sample_indices, inplace=True)
obj_ref_list.append(ray.put([block_index, df]))
dataset_info.blocks_shape[block_index] = df.shape
incomplete_df = None
block_count += 1
else:
incomplete_df = df
if len(obj_ref_list) == len(ray_actors):
ray_tasks = []
for i, actor in enumerate(ray_actors):
ref = obj_ref_list[i]
ray_tasks.append(actor.recv_data.remote(ref,
file_type_ref,
is_centralized_ref,
dataset_type_ref,
has_label_ref,
missing_values_ref))
block_index = block_count - len(obj_ref_list) + i
dataset_info.actor_to_block_map[ray_actors[i]].append(block_index)
ray.get(ray_tasks)
obj_ref_list = []
gc.collect()
except StopIteration:
break
iter_count += 1
shape.append((num_rows, num_cols))
if incomplete_df is not None:
block_index = block_count
sample_start_index = block_index * atomic_row_size_per_cpu_core
sample_indices = np.arange(sample_start_index, sample_start_index + len(incomplete_df))
incomplete_df.set_index(sample_indices, inplace=True)
# sample_indices = np.arange(sample_start_index, sample_start_index + len(df))
# df.set_index(sample_indices, inplace=True)
obj_ref_list.append(ray.put([block_index, incomplete_df]))
dataset_info.blocks_shape[block_index] = incomplete_df.shape
block_count += 1
if len(obj_ref_list) != 0:
ray_tasks = []
selected_actor_index = random.sample(
range(len(ray_actors)), len(obj_ref_list))
for i in range(len(obj_ref_list)):
ref = obj_ref_list[i]
actor_index = selected_actor_index[i]
actor = ray_actors[actor_index]
ray_tasks.append(actor.recv_data.remote(ref,
file_type_ref,
is_centralized_ref,
dataset_type_ref,
has_label_ref,
missing_values_ref))
block_index = block_count - len(obj_ref_list) + i
dataset_info.actor_to_block_map[ray_actors[actor_index]].append(block_index)
ray.get(ray_tasks)
dataset_info.feature_names = feature_names
dataset_info.label_name = label_name
dataset_info.shape = shape
gc.collect()
for actor_id, block_ids in dataset_info.actor_to_block_map.items():
for id in block_ids:
dataset_info.block_to_actor_map[id] = actor_id
if has_label:
dataset_info.label = np.concatenate(labels)
dataset_info.indices = np.concatenate(indices)
return dataset_info
| 8,156 | 38.028708 | 121 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/dataframe_head.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Dict, Union, Optional
import numpy as np
import pandas as pd
import ray
from algorithm.core.tree_ray.dataset_info import RayDatasetInfo
class XgbDataFrameHead:
def __init__(self,
dataset_info: RayDatasetInfo,
ray_tasks_num_returns: Optional[int] = None):
self.dataset_info = dataset_info
self.ray_tasks_num_returns = ray_tasks_num_returns
self.ray_actors = list(dataset_info.actor_to_block_map.keys())
self.NONE_REF = ray.put(None)
self.LATEST_REF = ray.put('latest')
self.ALL_REF = ray.put('all')
def nunique(self, cols: Optional[List[Union[bool, int]]] = None):
if cols is None:
cols_ref = self.NONE_REF
else:
cols_ref = ray.put(cols)
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.unique.remote(cols_ref))
res = None
while len(ray_tasks):
if self.ray_tasks_num_returns is None:
ray_tasks_num_returns = len(ray_tasks)
else:
ray_tasks_num_returns = min(self.ray_tasks_num_returns, len(ray_tasks))
done_task, ray_tasks = ray.wait(ray_tasks, num_returns=ray_tasks_num_returns)
unique_df_list = ray.get(done_task)
unique_df_list = list(filter(lambda x: False if x is None else True, unique_df_list))
if len(unique_df_list) == 0:
continue
if res is None:
res = pd.concat(unique_df_list)
else:
res = pd.concat([res] + unique_df_list)
res = res.apply(lambda x: [np.unique(np.concatenate(x.tolist()))])
res = res.apply(lambda x: len(x[0]))
return res
def set_cat_features(self, names: List[str]):
names_ref = ray.put(names)
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.set_cat_features.remote(names_ref))
# ray.get(ray_tasks)
def set_split_points(self, split_points: Dict[str, list]):
self.dataset_info.split_points = split_points
split_points_ref = ray.put(split_points)
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.set_split_points.remote(split_points_ref))
# ray.get(ray_tasks)
def xgb_binning(self, num_bins: int):
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.xgb_binning_phase1.remote())
def g(x: pd.Series):
""" 1. x -- |(int, int), (int, int)|
2. x -- |pd.Series, pd.Series|
"""
x = x.tolist()
if isinstance(x[0], pd.Series):
# merge only supports two dataframe
y = pd.merge(x[0], x[1], how='outer', left_index=True, right_index=True).fillna(0)
counted_values = y[x[0].name+'_x'] + y[x[0].name+'_y']
counted_values.name = x[0].name
return [counted_values]
else:
a, b = x[0], x[1]
min_v = min(a[0], b[0])
max_v = max(a[1], b[1])
return [(min_v, max_v)]
# Get min, max for continuous features and value_count for categorial features
statistic_df = None
while len(ray_tasks):
done_task, ray_tasks = ray.wait(ray_tasks)
out = ray.get(done_task[0])
if statistic_df is None:
statistic_df = out
else:
statistic_df = pd.concat([statistic_df, out])
statistic_df = statistic_df.apply(g)
# min & max -- |(min, max)|
# value_count -- |pd.Series|
# Calc split points or values in bins
def f(x: pd.Series):
x = x.iloc[0]
if isinstance(x, pd.Series): # Category
if x.shape[0] > num_bins:
x.sort_values(ascending=False, inplace=True)
values = x.index.values.tolist()
values_unique = values[:num_bins - 1]
values_group = values[num_bins - 1:]
uniques = np.array(values_unique + [values_group], dtype=object)
return [uniques]
else:
return [x.index.values.astype(object)] # [x.index.values.astype(np.int64)]
else:
min_v, max_v = x
split_points = np.linspace(min_v, max_v, num_bins + 1)[1:-1]
return [split_points]
split_points_df = statistic_df.apply(f)
# Call actors to apply split points
num_bins_ref = ray.put(num_bins)
split_points_df_ref = ray.put(split_points_df)
ray_tasks = []
for actor in self.ray_actors:
ray_tasks.append(actor.xgb_binning_phase2.remote(num_bins_ref, split_points_df_ref))
ray.get(ray_tasks)
if sum(split_points_df.shape) == 0:
split_points = {}
else:
split_points = {
feature_name: split_points_df[feature_name].iloc[0].tolist() for feature_name in split_points_df.columns
}
return split_points
| 6,037 | 36.042945 | 120 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/big_feature.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import pandas as pd
class Feature(object):
"""
Process column data.
"""
def __init__(self, data: pd.DataFrame):
self.data = data
@classmethod
def create(cls,
values: pd.DataFrame,
indices: Optional[np.ndarray] = None,
columns: Optional[list[str]] = None,
grad: Optional[np.ndarray] = None,
hess: Optional[np.ndarray] = None,
grad_hess: Optional[np.ndarray] = None):
# print(values.shape, len(indices) if indices is not None else None, "AAAAAAAAAA")
# Note indices act on the Index column
if indices is not None and len(indices) == 0:
return None
if indices is None and columns is None:
selected_values = values
elif indices is None:
selected_values = values.loc[:, columns]
elif columns is None:
selected_values = values.loc[indices, :]
else:
# print("c")
# print(len(indices), len(set(indices.tolist())))
# print(indices, "HAHA")
# print(values.index, "HBHBBH")
# if len(indices) == values.shape[0]:
# print("ca")
# print(len(values.index.tolist()))
# selected_values = values.loc[:, columns]
# else:
# print("cb")
# selected_values = values.loc[indices, columns]
# selected_values = values.loc[:, columns]
# print('cd')
# print(indices, "----")
selected_values = values.loc[indices, columns]
# print('d')
if indices is None:
indices = values.index
if grad_hess is not None:
# data = pd.concat([pd.DataFrame(grad_hess, columns=['xfl_grad_hess']).set_index(indices),
# selected_values], axis=1)
data = pd.DataFrame(columns=['xfl_grad_hess'] + selected_values.columns.to_list(),
index=indices)
data['xfl_grad_hess'] = grad_hess
data[selected_values.columns] = selected_values
else:
# data = pd.concat([pd.DataFrame(grad, columns=['xfl_grad']).set_index(indices),
# pd.DataFrame(hess, columns=['xfl_hess']).set_index(indices),
# selected_values], axis=1)
data = pd.DataFrame(columns=['xfl_grad', 'xfl_hess'] + selected_values.columns.to_list(),
index=indices)
data['xfl_grad'] = grad
data['xfl_hess'] = hess
data[selected_values.columns] = selected_values
return Feature(data)
def slice_by_indices(self, indices: Optional[np.ndarray]):
if indices is None:
# Note it is not a copy.
data = self.data
else:
data = self.data.loc[indices, :]
return Feature(data)
if __name__ == "__main__":
a = pd.DataFrame({
"a": [1, 2, 3, 4],
"b": [3, 4, 5, 10],
"c": [6, 11, 9, 10]
}).set_index(np.array([2, 7, 11, 12]))
grad = np.array([1, 2, 3])
hess = np.array([3, 2, 1])
indices = np.array([2, 11, 12])
# indices = np.array([])
columns = ['a', 'c']
f = Feature.create(a, indices, columns, grad, hess, None)
print(f.data)
print(f.slice_by_indices(np.array([11, 12])).data)
| 4,184 | 34.168067 | 102 | py |
XFL | XFL-master/python/algorithm/core/tree/feature_importance.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FeatureImportance(object):
def __init__(self, importance_gain=0, importance_split=0, main_type="split"):
self.legal_type = ["split", "gain"]
assert main_type in self.legal_type, "illegal importance type {}".format(main_type)
self.importance_gain = importance_gain
self.importance_split = importance_split
self.main_type = main_type
def get(self):
if self.main_type == "split":
return self.importance_split
elif self.main_type == "gain":
return self.importance_gain
def add_gain(self, val):
self.importance_gain += val
def add_split(self, val):
self.importance_split += val
def __eq__(self, other):
if self.main_type == "split":
return self.importance_split == other.importance_split
elif self.main_type == "gain":
return self.importance_gain == other.importance_gain
def __lt__(self, other):
if self.main_type == "split":
return self.importance_split < other.importance_split
elif self.main_type == "gain":
return self.importance_gain < other.importance_gain
def __repr__(self):
if self.main_type == "gain":
return "importance: {}".format(self.importance_gain)
elif self.main_type == "split":
return "importance: {}".format(self.importance_split)
def __add__(self, other):
new_importance = FeatureImportance(main_type=self.main_type,
importance_gain=self.importance_gain + other.importance_gain,
importance_split=self.importance_split + other.importance_split)
return new_importance
| 2,352 | 38.881356 | 107 | py |
XFL | XFL-master/python/algorithm/core/tree/cat_param_parser.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
def parse_category_param(df: pd.DataFrame,
col_index: str = "",
col_names: list[str] = [],
max_num_value: int = 0,
col_index_type: str = 'inclusive',
col_names_type: str = 'inclusive',
max_num_value_type: str = 'union') -> list[int]:
""" Calculate column indexes and column names of category. The formulation is:
features that column indexes are in col_index if col_index_type is 'inclusive' or not in col_index if col_index_type is 'exclusive'.
union
features that column names are in col_names if col_names_type is 'inclusive' or not in col_names if col_names_type is 'exclusive'.
union if max_num_value_type is 'union' or intersect if max_num_value_type is 'intersection'
features that number of different values is less equal than max_num_value.
Args:
df (pd.DataFrame): input dataframe.
col_index (str): column index of features which are supposed to be (or not to be) a categorial feature. Defaults to "".
col_names (list[str]): column names of features which are supposed to be (or not to be) a categorical feature. Defaults to [].
max_num_value (int): if n <= max_num_value where n is the number of different values in a feature column, then the feature is supposed to be a category feature. Defalts to 0.
col_index_type (str, optional): support 'inclusive' and 'exclusive'. Defaults to 'inclusive'.
col_names_type (str, optional): support 'inclusive' and 'exclusive'. Defaults to 'inclusive'.
max_num_value_type (str, optional): support 'intersection' and 'union'. Defaults to 'union'.
Returns:
list[int]: list of categorial feature column indexes.
Note:
col_index is count from the first column of features, not the input table.
col_index support single value and slice. For example, a vaild form of col_index is "2, 4:8, -7, -10:-7", where "4:8" means "4,5,6,7",
vaild form of col_names is like ["wage", "age"].
"""
res = []
if col_index != "":
index1 = _parse_index(col_index, len(df.columns))
if col_index_type == 'inclusive':
res += index1
elif col_index_type == 'exclusive':
res += list(set(range(len(df.columns))) - set(index1))
else:
raise ValueError(
f"col_index_type {col_index_type} not valid, need to be one of the 'inclusive' and 'exclusive'.")
if col_names != []:
index2 = _parse_names(col_names, df.columns.to_list())
if col_names_type == 'inclusive':
res += index2
elif col_names_type == 'exclusive':
res += list(set(range(len(df.columns))) - set(index2))
else:
raise ValueError(
f"col_names_type {col_names_type} not valid, need to be one of the 'inclusive' and 'exclusive'.")
res = list(set(res))
if max_num_value > 0:
if max_num_value_type == "union":
num_unique = df.nunique().to_numpy()
index3 = list(np.where(num_unique <= max_num_value)[0])
res += index3
elif max_num_value_type == "intersection":
col_selection = [False for i in range(len(df.columns))]
for i in res:
col_selection[i] = True
df_category = df.iloc[:, col_selection]
num_unique = df_category.nunique().to_numpy()
index3 = list(np.where(num_unique <= max_num_value)[0])
res = list(map(lambda x: res[x], index3))
else:
raise ValueError(
f"max_num_value_type {max_num_value_type} not valid, need to be one of the 'union' and 'intersect'.")
res = list(set(res))
return res
def _parse_index(index: str, num_cols: int) -> list[int]:
''' index form is "1, 3:5, 4, 8:11'''
res = []
index_list = index.replace(' ', '').split(',')
for value in index_list:
if ':' in value:
left, right = value.split(':')
if left == "":
left = 0
if int(left) < 0:
left = min(max(0, num_cols + int(left)), num_cols)
if right == "":
right = num_cols
if int(right) < 0:
right = min(max(0, num_cols + int(right)), num_cols)
res += [i for i in range(int(left), int(right))]
else:
value = int(value)
if abs(value) >= num_cols:
raise ValueError(f"Column index {value} is greater equal than the column size {num_cols}")
if value < 0:
value += num_cols
res.append(value)
res = list(set(res))
return res
def _parse_names(names: list[str], valid_names: list[str]) -> list[int]:
res = []
name_list = [item.strip() for item in names]
for name in name_list:
try:
i = valid_names.index(name)
res.append(i)
except ValueError as e:
raise ValueError(f"Column name {name} not found: {e}")
res = list(set(res))
return res
| 5,899 | 38.597315 | 182 | py |
XFL | XFL-master/python/algorithm/core/tree/gain_calc.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
import numpy as np
import pandas as pd
def cal_cat_rank(sum_grad: Union[pd.Series, np.ndarray],
sum_hess: Union[pd.Series, np.ndarray],
cat_smooth: float) -> Union[pd.Series, np.ndarray]:
return sum_grad / (sum_hess + cat_smooth)
def cal_gain(cum_grad: np.ndarray,
cum_hess: np.ndarray,
lambda_: float) -> np.ndarray:
if len(cum_grad) <= 1:
return np.array([-float('inf')], dtype=np.float32)
grad_left = cum_grad[:-1]
grad_right = cum_grad[-1] - grad_left
hess_left = cum_hess[:-1]
hess_right = cum_hess[-1] - hess_left
base_score = np.square(cum_grad[-1]) / (cum_hess[-1] + lambda_)
gain = np.square(grad_left) / (hess_left + lambda_) + \
np.square(grad_right) / (hess_right + lambda_) - \
base_score
return gain
# def cal_gain(cum_grad: np.ndarray,
# cum_hess: np.ndarray,
# lambda_: float,
# grad_missing: Optional[float] = None,
# hess_missing: Optional[float] = None) -> np.ndarray:
# if len(cum_grad) <= 1:
# return np.array([-float('inf')], dtype=np.float32)
# grad_left = cum_grad[:-1]
# grad_right = cum_grad[-1] - grad_left
# hess_left = cum_hess[:-1]
# hess_right = cum_hess[-1] - hess_left
# if grad_missing is None or hess_missing is None or (grad_missing == 0 and hess_missing == 0):
# base_score = np.square(cum_grad[-1]) / (cum_hess[-1] + lambda_)
# gain = np.square(grad_left) / (hess_left + lambda_) + \
# np.square(grad_right) / (hess_right + lambda_) - \
# base_score
# else:
# base_score = np.square(cum_grad[-1] + grad_missing) / (cum_hess[-1] + hess_missing + lambda_)
# gain_missing_on_left = np.square(grad_left + grad_missing) / (hess_left + hess_missing + lambda_) + \
# np.square(grad_right) / (hess_right + lambda_) - \
# base_score
# gain_missing_on_right = np.square(grad_left) / (hess_left + lambda_) + \
# np.square(grad_right + grad_missing) / (hess_right + hess_missing + lambda_) - \
# base_score
# gain = np.concatenate([gain_missing_on_left, gain_missing_on_right])
# return gain
def cal_weight(sum_grad: float,
sum_hess: float,
lambda_: float) -> float:
return -sum_grad / (sum_hess + lambda_)
class BestSplitInfo(object):
def __init__(self,
gain: float = -float('inf'),
feature_ower: str = '',
feature_index: int = 0,
feature_name: str = '',
is_category: bool = False,
split_point: Optional[float] = None,
left_cat: Optional[list] = None,
missing_value_on_left: bool = None,
left_sample_index: Optional[np.ndarray] = None,
right_sample_index: Optional[np.ndarray] = None,
left_bin_weight: float = 0,
right_bin_weight: float = 0,
num_left_bin: Optional[int] = None,
num_right_bin: Optional[int] = None,
max_gain_index: Optional[int] = None):
self.gain = gain
self.feature_owner = feature_ower
self.feature_idx = feature_index
self.feature_name = feature_name
self.is_category = is_category
self.split_point = split_point
self.left_cat = left_cat
self.missing_value_on_left = missing_value_on_left
self.left_sample_index = left_sample_index
self.right_sample_index = right_sample_index
self.left_bin_weight = left_bin_weight
self.right_bin_weight = right_bin_weight
self.num_left_bin = num_left_bin
self.num_right_bin = num_right_bin
self.max_gain_index = max_gain_index
| 4,676 | 39.318966 | 118 | py |
XFL | XFL-master/python/algorithm/core/tree/xgboost_loss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import torch
from common.utils.constants import BCEWithLogitsLoss, MSELoss
from ..activation import sigmoid
def get_xgb_loss_inst(name: str, params: Optional[dict] = None):
name = name.lower()
if name == BCEWithLogitsLoss.lower():
return XGBBCEWithLogitsLoss(params)
elif name == MSELoss.lower():
return XGBLoss(params)
else:
raise NotImplementedError(f"Loss {name} not implemented.")
class XGBLoss(object):
def __init__(self, params: Optional[dict] = None):
self.name = 'Loss'
self.params = params
def cal_grad(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = True):
raise NotImplementedError("Method cal_grad not implemented.")
def cal_hess(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = True):
raise NotImplementedError("Method cal_hess not implemented.")
# def predict(raw_value: np.ndarray):
# raise NotImplemented("Method predict not implemented.")
def cal_loss(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = False):
raise NotImplementedError("Method cal_loss not implemented.")
class XGBBCEWithLogitsLoss(XGBLoss):
def __init__(self, params: Optional[dict] = None):
super().__init__(params)
self.name = BCEWithLogitsLoss
def cal_grad(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = True):
if not after_prediction:
y_pred = sigmoid(y_pred)
return y_pred - y
def cal_hess(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = True):
if not after_prediction:
y_pred = sigmoid(y_pred)
return y_pred * (1 - y_pred)
def predict(self, raw_value: np.ndarray):
return sigmoid(raw_value)
def cal_loss(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = False):
if not after_prediction:
loss_func = torch.nn.BCEWithLogitsLoss()
loss = loss_func(torch.tensor(y_pred), torch.tensor(y)).item()
else:
loss_func = torch.nn.BCELoss()
loss = loss_func(torch.tensor(y_pred), torch.tensor(y)).item()
return loss
# if not after_prediction:
# y_pred = sigmoid(y_pred)
# _loss = -y * np.log(y_pred) - (1 - y) * np.log(1 - y_pred)
# loss = np.average(_loss)
# return loss
class XGBMSELoss(XGBLoss):
def __init__(self, params: Optional[dict] = None):
super().__init__(params)
self.name = MSELoss
def cal_grad(self, y: np.ndarray, y_pred: np.ndarray):
return -2 * (y - y_pred)
def cal_hess(self, y: np.ndarray, y_pred: np.ndarray):
return 2
def predict(self, raw_value: np.ndarray):
return raw_value
def cal_loss(self, y: np.ndarray, y_pred: np.ndarray):
loss_func = torch.nn.MSELoss()
loss = loss_func(torch.tensor(y_pred), torch.tensor(y)).item()
# _loss = np.square(y - y_pred)
# loss = np.average(_loss)
return loss
| 3,740 | 33.962617 | 90 | py |
XFL | XFL-master/python/algorithm/core/tree/goss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class Goss(object):
"""Gradient based one side sampling(GOSS) in LightGBM, obeyed to LightGBM original paper.
Link: https://papers.nips.cc/paper/2017/file/6449f44a102fde848669bdd9eb6b76fa-Paper.pdf
"""
def __init__(self, alpha: float, beta: float):
"""
Args:
alpha (float): sampling ratio of large gradient data.
beta (float): sampling ratio of small gradient data.
"""
if not 0 <= alpha <= 1 or not 0 <= beta <= 1:
raise ValueError(f"alpha {alpha} and beta {beta} should >= 0 and <= 1.")
if not 0 < alpha + beta <= 1:
raise ValueError(f"alpha {alpha} + beta {beta} should between > 0 and <= 1")
self.alpha = alpha
self.beta = beta
def sampling(self, g: np.ndarray) -> np.ndarray:
""" Generate sample index
Args:
g (np.ndarray): gradients list, 1-d array.
Returns:
np.ndarray: selected sample index
"""
# topN = a * len(i), randN = b * len(i)
top_n, rand_n = int(self.alpha * len(g)), int(self.beta * len(g))
# sorted = GetSortedIndices(abs(g))
sorter = np.argsort(abs(g))[::-1]
# topSet = sorted[1:topN]
top_set_idx = sorter[:top_n]
# randSet = RandomPick(sorted[topN:len(I)], randN)
rand_set_idx = sorter[top_n:]
rand_set_idx = np.random.choice(sorter[top_n:], rand_n, replace=False)
self.rand_set_idx = rand_set_idx
selected_idx = np.sort(np.concatenate([top_set_idx, rand_set_idx]))
if len(selected_idx) == 0:
raise ValueError("Length of selected sample is 0.")
return selected_idx
def update_gradients(self, g: np.ndarray, h: np.ndarray) -> None:
if len(self.rand_set_idx) == 0:
return
g[self.rand_set_idx] *= (1 - self.alpha) / self.beta
h[self.rand_set_idx] *= (1 - self.alpha) / self.beta
| 2,623 | 34.945205 | 94 | py |
XFL | XFL-master/python/algorithm/core/tree/tree_param.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, List
from algorithm.core.encryption_param import EncryptionParam
class EarlyStoppingParam(object):
pass
class LossParam(object):
def __init__(self, name):
self.name = name
class XGBTreeParam(object):
def __init__(self,
# task_type: str, # 'classification', 'regression'
loss_param: LossParam, # ["cross_entropy", "lse", "lae", "huber", "fair", "log_cosh", "tweedie"]
num_trees: int,
learning_rate: float,
gamma: float,
lambda_: float,
max_depth: int,
num_bins: int,
min_split_gain: float,
min_sample_split: int,
min_leaf_node: int,
feature_importance_type: str,
run_goss: bool,
top_rate: float,
other_rate: float,
# validation_freqs: int,
metrics: List[str],
early_stopping_param: Optional[EarlyStoppingParam] = None, # 'split',(split time) 'gain'(split gain)
encryption_param: Optional[EncryptionParam] = None,
subsample_feature_rate: float = 1.0,
missing_value: float = float('inf'),
max_num_cores: int = 9999,
col_batch: int = 128,
row_batch: int = 10000,
atomic_row_size_per_cpu_core: int = 10000,
ray_task_num_returns: Optional[int] = None,
ray_col_step: int = 20,
pack_grad_hess: bool = True,
batch_blocks_on_recv: int = 10,
# category feature params
cat_col_index: str = "",
cat_col_names: List[str] = [],
cat_max_num_value: int = 0,
cat_col_index_type: str = 'inclusive',
cat_col_names_type: str = 'inclusive',
cat_max_num_value_type: str = 'union',
cat_smooth: float = 0):
# self.task_type = task_type
self.loss_param = loss_param
self.num_trees = num_trees
self.learning_rate = learning_rate
self.gamma = gamma
self.lambda_ = lambda_
self.early_stopping_param = early_stopping_param
self.encryption_param = encryption_param
# single tree training
self.max_depth = max_depth
self.num_bins = num_bins
self.min_split_gain = min_split_gain
self.min_sample_split = min_sample_split
self.min_leaf_node = min_leaf_node
self.feature_importance_type = feature_importance_type
self.subsample_feature_rate = subsample_feature_rate
self.missing_value = missing_value
self.run_goss = run_goss
self.top_rate = top_rate
self.other_rate = other_rate
# validation
# self.validation_freqs = validation_freqs
self.metrics = metrics
# multiprocess
self.max_num_cores = max_num_cores
self.col_batch = col_batch
self.row_batch = row_batch
self.atomic_row_size_per_cpu_core = atomic_row_size_per_cpu_core
self.ray_task_num_returns = ray_task_num_returns
self.ray_col_step = ray_col_step
self.pack_grad_hess = pack_grad_hess
self.batch_blocks_on_recv = batch_blocks_on_recv
self.cat_col_index = cat_col_index
self.cat_col_names = cat_col_names
self.cat_max_num_value = cat_max_num_value
self.cat_col_index_type = cat_col_index_type
self.cat_col_names_type = cat_col_names_type
self.cat_max_num_value_type = cat_max_num_value_type
self.cat_smooth = cat_smooth
| 4,525 | 35.796748 | 118 | py |
XFL | XFL-master/python/algorithm/core/tree/tree_structure.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import random
import string
from typing import Dict, List, Optional, Tuple
import numpy as np
from google.protobuf import json_format
from common.utils.logger import logger
from common.model.python.tree_model_pb2 import XGBoostModel, NodeModel
class SplitInfo(object):
def __init__(self,
owner_id: str,
feature_idx: Optional[int] = None,
feature_name: Optional[str] = None,
is_category: bool = False,
missing_value_on_left: Optional[bool] = True,
split_point: Optional[float] = None,
left_cat: Optional[List[str]] = None,
gain: int = 0):
self.owner_id = owner_id
self.feature_idx = feature_idx
self.feature_name = feature_name
self.is_category = is_category
self.missing_value_on_left = missing_value_on_left
self.split_point = split_point
self.left_cat = left_cat # when is category
self.gain = gain
@classmethod
def from_dict(self, data: dict):
split_info = SplitInfo(owner_id=data['owner_id'])
for k, v in data.items():
setattr(split_info, k, v)
return split_info
def to_dict(self):
res = {}
attribute_list = ["owner_id", "feature_idx", "feature_name", "is_category", "split_point", "left_cat"]
for name in attribute_list:
res[name] = getattr(self, name, None)
return res
# def to_min_dict(self):
# res = {}
# attribute_list = ["feature_idx", "is_category", "split_point", "left_cat"]
# for name in attribute_list:
# res[name] = getattr(self, name)
# return res
class Node(object):
def __init__(self,
id: str,
depth: int = -1,
sample_index: Optional[np.ndarray] = None,
left_node_id: Optional[str] = None,
right_node_id: Optional[str] = None,
parent_node_id: Optional[str] = None,
split_info: Optional[SplitInfo] = None,
is_leaf: bool = True,
weight: Optional[float] = None,
linkage: Optional[str] = None):
self.id = id
self.depth = depth
self.left_node_id = left_node_id
self.right_node_id = right_node_id
self.parent_node_id = parent_node_id
self.split_info = split_info
self.is_leaf = is_leaf
self.weight = weight
self.linkage = linkage
# for training
self.sample_index = sample_index
@classmethod
def from_dict(self, data: dict):
node = Node(id=data["id"])
for k, v in data.items():
if k == "split_info":
split_info = SplitInfo.from_dict(v) if v else None
setattr(node, k, split_info)
else:
setattr(node, k, v)
return node
def to_dict(self):
res = {}
attribute_list = ["id", "depth", "left_node_id", "right_node_id",
"split_info", "is_leaf", "weight", "linkage"]
for name in attribute_list:
if name == "split_info":
res[name] = getattr(self, name)
if res[name]:
res[name] = res[name].to_dict()
else:
res[name] = getattr(self, name)
return res
def to_min_dict(self):
res = {}
res["id"] = self.id
res["split_info"] = self.split_info.to_dict() if self.split_info else None
return res
def update_as_non_leaf(self,
split_info: SplitInfo,
left_node_id: str,
right_node_id: str):
self.split_info = split_info
self.is_leaf = False
self.left_node_id = left_node_id
self.right_node_id = right_node_id
def update_as_leaf(self, weight: float):
self.weight = weight
self.is_leaf = True
class Tree(object):
def __init__(self,
party_id: str,
tree_index: int,
root_node_id: Optional[str] = None,
nodes: Optional[Dict[str, Node]] = {}):
self.party_id = party_id
self.tree_index = tree_index
self.root_node_id = root_node_id
self.nodes: Dict[str, Node] = {} # important
self.nodes.update(nodes)
if root_node_id is None:
self.root_node = Node(id=self._generate_id(), depth=0)
self.root_node_id = self.root_node.id
self.nodes[self.root_node.id] = self.root_node
else:
if self.root_node_id not in self.nodes:
raise ValueError(f"Tree root node id {self.root_node_id} not in nodes ids.")
else:
self.root_node = self.nodes[self.root_node_id]
@classmethod
def from_dict(cls, data: dict):
tree = Tree(party_id=data["party_id"], tree_index=data["tree_index"])
for k, v in data.items():
if k == "nodes":
value = {node_id: Node.from_dict(node)for node_id, node in v.items()}
setattr(tree, k, value)
else:
setattr(tree, k, v)
return tree
def to_dict(self):
res = {}
attribute_list = ["party_id", "tree_index", "root_node_id", "nodes"]
for name in attribute_list:
if name == "nodes":
nodes_dict: dict[str, Node] = getattr(self, name)
res[name] = {k: v.to_dict() for k, v in nodes_dict.items()}
else:
res[name] = getattr(self, name)
return res
def clear_training_info(self):
for k, node in self.nodes.items():
node.sample_index = None
def check_node(self, node_id):
if node_id not in self.nodes:
return False
return True
def search_nodes(self, depth: int) -> List[Node]:
res = []
for node_id, node in self.nodes.items():
if node.depth == depth:
res.append(node)
return res
def _generate_id(self) -> str:
flag = True
while flag:
id = ''.join(random.sample(string.ascii_letters + string.digits, 16))
if id not in self.nodes:
flag = False
return '_'.join([str(self.tree_index), id])
def split(self,
node_id: str,
split_info: SplitInfo,
left_sample_index: List[int],
right_sample_index: List[int],
left_sample_weight: float,
right_sample_weight: float,
left_node_id: Optional[str] = None,
right_node_id: Optional[str] = None) -> Tuple[str, str]:
if not self.check_node(node_id):
logger.warning(f"Node_id {node_id} not valid, can't split")
return None, None
node = self.nodes[node_id]
node.split_info = split_info
node.is_leaf = False
left_child_node = Node(id=self._generate_id() if left_node_id is None else left_node_id,
depth=node.depth+1,
sample_index=left_sample_index,
parent_node_id=node_id,
weight=left_sample_weight,
linkage="left")
right_child_node = Node(id=self._generate_id() if right_node_id is None else right_node_id,
depth=node.depth+1,
sample_index=right_sample_index,
parent_node_id=node_id,
weight=right_sample_weight,
linkage="right")
self.nodes[left_child_node.id] = left_child_node
self.nodes[right_child_node.id] = right_child_node
node.left_node_id = left_child_node.id
node.right_node_id = right_child_node.id
return left_child_node.id, right_child_node.id
def set_weight(self, node_id: str, weight: float):
if self.check_node(node_id):
node = self.nodes[node_id]
node.weight = weight
else:
raise KeyError(f"Node_id {node_id} not valid, can't set weight.")
class BoostingTree(object):
""" For trainer with label
"""
def __init__(self,
lr: List[float] = [],
max_depth: List[int] = [],
trees: List[Tree] = [],
suggest_threshold: Optional[float] = None,
loss_method: str = "BCEWithLogitsLoss",
version: str = '1.0'):
if not isinstance(lr, list):
raise TypeError(f"Parameters lr should be a list of values, not {type(lr)}.")
if not isinstance(max_depth, list):
raise TypeError(f"Parameters max_depth should be a list of values, not {type(max_depth)}.")
if not isinstance(trees, list):
raise TypeError(f"Parameters trees should be a list of Tree instance, not {type(trees)}")
if len(lr) != len(trees):
raise ValueError(f"Length of lr {len(lr)} not equals to the length of trees {len(trees)}.")
if len(max_depth) != len(trees):
raise ValueError(f"Length of max_depth {len(max_depth)} not equals to the length of trees {len(trees)}.")
if loss_method not in ["BCEWithLogitsLoss"]:
raise NotImplementedError(f"Method {loss_method} is not implemented.")
self.trees = trees
self.lr = lr
self.max_depth = max_depth
self.suggest_threshold = suggest_threshold
self.loss_method = loss_method
self.version = version
@classmethod
def from_dict(cls, data: dict):
tree = BoostingTree(lr=data["lr"],
max_depth=data.get("max_depth", None),
trees=[Tree.from_dict(tree) for tree in data["trees"]],
suggest_threshold=data.get("suggest_threshold", None),
loss_method=data.get("loss_method", None),
version=data.get('version', '1.0'))
return tree
@classmethod
def from_proto(cls, bs: str):
xgb = XGBoostModel()
xgb.ParseFromString(bs)
d = json_format.MessageToDict(xgb,
including_default_value_fields=True,
preserving_proto_field_name=True)
return cls.from_dict(d)
def to_proto(self,
suggest_threshold: Optional[float] = None,
compute_group: bool = False):
xgb = XGBoostModel()
# logger.info("to dict")
d = self.to_dict(suggest_threshold, compute_group)
# logger.info("after to dict, to proto")
json_format.ParseDict(d, xgb)
out = xgb.SerializeToString()
# logger.info("after to proto")
return out
def to_dict(self,
suggest_threshold: Optional[float] = None,
compute_group: bool = False):
res = {}
# for binary classification
res["suggest_threshold"] = suggest_threshold or self.suggest_threshold
attribute_list = ["lr", "max_depth", "trees", "version", "loss_method"]
for name in attribute_list:
if name == "trees":
trees = [tree.to_dict() for tree in getattr(self, name)]
res[name] = trees
else:
res[name] = getattr(self, name)
res['num_trees'] = len(res['trees'])
if compute_group:
node_id_of_owner = {}
for tree in self.trees:
for node_id in tree.nodes:
split_info = tree.nodes[node_id].split_info
owner_id = split_info.owner_id if split_info else None
if owner_id is None:
continue
if owner_id not in node_id_of_owner:
node_id_of_owner[owner_id] = [node_id]
else:
node_id_of_owner[owner_id].append(node_id)
for owner_id in node_id_of_owner:
node_id_of_owner[owner_id].sort()
# For reducing transmit data at inference stage
node_id_group = {}
for _, v in node_id_of_owner.items():
# Because owner_id is unstable
node_id_group[v[0]] = {"node_id_list": v}
res["node_id_group"] = node_id_group
return res
def append(self, tree: Tree, lr: float, max_depth: int):
self.trees.append(tree)
self.lr.append(lr)
self.max_depth.append(max_depth)
def __len__(self):
return len(self.trees)
def __getitem__(self, index):
# support slice
cls = type(self)
if isinstance(index, slice):
trees = self.trees[index]
lr = self.lr[index]
max_depth = self.max_depth[index]
# Note suggest_threshold is probably not correct after slice
suggest_threshold = self.suggest_threshold
loss_method = self.loss_method
version = self.version
return cls(lr, max_depth, trees, suggest_threshold, loss_method, version)
else:
msg = "{cls.__name__} indices must be slice."
raise TypeError(msg.format(cls=cls))
class NodeDict(object):
""" For trainer without label, only store nodes
"""
def __init__(self, nodes: Dict[str, Node] = None):
self.nodes = nodes or {}
@classmethod
def from_dict(self, data: dict):
nodes = {id: Node.from_dict(node) for id, node in data.items()}
return NodeDict(nodes)
def to_dict(self) -> dict:
res = {id: node.to_min_dict() for id, node in self.nodes.items()}
return res # {"nodes": res}
@classmethod
def from_proto(cls, bs: str):
node = NodeModel()
node.ParseFromString(bs)
d = json_format.MessageToDict(node,
including_default_value_fields=True,
preserving_proto_field_name=True)
return cls.from_dict(d["nodes"])
def to_proto(self):
node = NodeModel()
d = {"nodes": self.to_dict()}
d = {k: v for k, v in sorted(d.items())}
json_format.ParseDict(d, node)
out = node.SerializeToString()
return out
def update(self, nodes: Dict[str, Node]):
for id, node in nodes.items():
self.nodes[id] = node
def __len__(self):
return len(self.nodes)
| 15,688 | 36.089835 | 117 | py |
XFL | XFL-master/python/algorithm/core/tree/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/core/tree/big_feature.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import pandas as pd
class Feature(object):
"""
Process column data.
"""
def __init__(self, data: pd.DataFrame, feature_columns: list):
self.data = data
self.feature_columns = feature_columns
@classmethod
def create(cls,
values: pd.DataFrame,
sample_index: Optional[np.ndarray] = None,
grad: Optional[np.ndarray] = None,
hess: Optional[np.ndarray] = None,
grad_hess: Optional[np.ndarray] = None):
values.reset_index(drop=True, inplace=True)
if sample_index is None:
sample_index = range(values.shape[0])
if grad_hess is not None:
data = pd.concat([pd.DataFrame(sample_index, columns=['xfl_id']),
pd.DataFrame(grad_hess, columns=['xfl_grad_hess']),
values], axis=1)
elif grad is not None and hess is not None:
data = pd.concat([pd.DataFrame(sample_index, columns=['xfl_id']),
pd.DataFrame(grad, columns=['xfl_grad']),
pd.DataFrame(hess, columns=['xfl_hess']), values], axis=1)
else:
raise ValueError("Grad and hess are not given.")
return Feature(data, list(values.columns))
def slice_by_sample_index(self, sample_index: np.ndarray):
df_sample_index = pd.DataFrame(sample_index)
data = pd.merge(self.data, df_sample_index, left_on='xfl_id', right_on=0)
return Feature(data, self.feature_columns)
# def slice_by_row_index(self, row_index: np.ndarray):
# data = self.data.iloc[row_index]
# return Feature(data, self.feature_columns)
| 2,413 | 37.31746 | 88 | py |
XFL | XFL-master/python/algorithm/core/tree/pack_index.py | import numpy as np
def pack_index(index_list: list[int]) -> np.ndarray:
bit_array = [0] * (max(index_list) + 1)
for i in index_list:
bit_array[i] = 1
res = np.packbits(bit_array)
return res
def unpack_index(packed_index: np.ndarray) -> list[int]:
bit_array = np.unpackbits(packed_index)
res = list(np.where(bit_array == 1)[0])
return res
| 378 | 22.6875 | 56 | py |
XFL | XFL-master/python/algorithm/core/tree/visualize.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from graphviz import Digraph
from .tree_structure import Tree
# def show_tree(tree: Tree):
# dot = Digraph(name="tree", comment="tree vis", format="png")
# for node_id, node in tree.nodes.items():
# if node.split_info is None:
# split_info = "%.5f" % node.weight
# else:
# if node.split_info.split_point is not None:
# # split_info = "%d:%.5f" % (node.split_info.feature_idx, node.split_info.split_point)
# split_info = "%d:%.5f:%s" % (node.split_info.feature_idx, node.split_info.split_point, node.split_info.owner_id)
# else:
# split_info = "%d:%s" % (node.split_info.feature_idx, node.split_info.owner_id)
# dot.node(name=node_id,
# label=split_info)
# # label="" if node.weight is None else str(node.weight))
# # label="" if node.sample_index is None else str(len(node.sample_index))) # + "," + "" if node.weight is None else str(node.weight))
# # label = "split_point:" + "" if node.split_info is None else str(node.split_info.split_point) + \
# # "num_samples:" + "" if node.sample_index is None else str(len(node.sample_index)))
# for node_id, node in tree.nodes.items():
# if node.left_node_id is not None:
# dot.edge(node_id, node.left_node_id)
# dot.edge(node_id, node.right_node_id)
# dot.view(filename="tree_structure", directory='.')
| 2,126 | 45.23913 | 151 | py |
XFL | XFL-master/python/algorithm/core/loss/torch_loss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import torch.nn as torch_nn
from torch.nn import Module
import torch
from common.xregister import xregister
def get_lossfunc(name: str):
if name in dir(torch_nn):
lossfunc = getattr(torch_nn, name)
elif name in dir(sys.modules[__name__]):
lossfunc = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
lossfunc = xregister(name)
else:
raise ValueError(f"Loss function {name} is not supported in torch.")
return lossfunc
class MapeLoss(Module):
def __init__(self):
super(MapeLoss, self).__init__()
def forward(self, preds: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
mask = (labels != 0)
distance = torch.abs(preds - labels) / torch.abs(labels)
return torch.mean(distance[mask])
| 1,428 | 32.232558 | 81 | py |
XFL | XFL-master/python/algorithm/core/loss/jax_loss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import optax
from common.xregister import xregister
def get_lossfunc(name: str):
if name in dir(optax):
loss_func = getattr(optax, name)
elif name in dir(sys.modules[__name__]):
loss_func = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
loss_func = xregister(name)
else:
raise ValueError(f"Loss function {name} is not supported in jax.")
return loss_func
| 1,062 | 33.290323 | 74 | py |
XFL | XFL-master/python/algorithm/core/loss/tf_loss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow.keras.losses as tf_loss
from common.xregister import xregister
def get_lossfunc(name: str):
if name in dir(tf_loss):
loss_func = getattr(tf_loss, name)
elif name in dir(sys.modules[__name__]):
loss_func = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
loss_func = xregister(name)
else:
raise ValueError(f"Loss function {name} is not supported in tensorflow.")
return loss_func
| 1,098 | 34.451613 | 81 | py |
XFL | XFL-master/python/algorithm/core/loss/paddle_loss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle.nn as paddle_nn
from common.xregister import xregister
def get_lossfunc(name: str):
if name in dir(paddle_nn):
loss_func = getattr(paddle_nn, name)
elif name in dir(sys.modules[__name__]):
loss_func = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
loss_func = xregister(name)
else:
raise ValueError(f"Loss function {name} is not supported in paddlepaddle.")
return loss_func
| 1,096 | 34.387097 | 83 | py |
XFL | XFL-master/python/algorithm/core/optimizer/paddle_optimizer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle.optimizer as pd_optim
from common.xregister import xregister
def get_optimizer(name: str):
optim = None
if name in dir(pd_optim):
optim = getattr(pd_optim, name)
elif name in dir(sys.modules[__name__]):
optim = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
optim = xregister(name)
else:
raise ValueError(f"Optimizer {name} is not supported in torch.")
return optim
| 1,087 | 33 | 74 | py |
XFL | XFL-master/python/algorithm/core/optimizer/torch_optimizer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import torch.optim as torch_optim
from common.xregister import xregister
def get_optimizer(name: str):
optim = None
if name in dir(torch_optim):
optim = getattr(torch_optim, name)
elif name in dir(sys.modules[__name__]):
optim = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
optim = xregister(name)
else:
raise ValueError(f"Optimizer {name} is not supported in torch.")
return optim
| 1,091 | 33.125 | 74 | py |
XFL | XFL-master/python/algorithm/core/optimizer/jax_optimizer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import optax
from common.xregister import xregister
def get_optimizer(name: str):
optim = None
if name in dir(optax):
optim = getattr(optax, name)
elif name in dir(sys.modules[__name__]):
optim = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
optim = xregister(name)
else:
raise ValueError(f"Optimizer {name} is not supported in jax.")
return optim
| 1,056 | 32.03125 | 74 | py |
XFL | XFL-master/python/algorithm/core/optimizer/tf_optimizer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow.keras.optimizers as tf_optim
from common.xregister import xregister
def get_optimizer(name: str):
optim = None
if name in dir(tf_optim):
optim = getattr(tf_optim, name)
elif name in dir(sys.modules[__name__]):
optim = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
optim = xregister(name)
else:
raise ValueError(f"Optimizer {name} is not supported in tensorflow.")
return optim
| 1,104 | 32.484848 | 77 | py |
XFL | XFL-master/python/algorithm/config/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/vertical_sampler/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_xgboost/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/vertical_binning_woe_iv/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/vertical_poisson_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_bert/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/local_data_statistic/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/vertical_kmeans/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/transfer_logistic_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_nbafl/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_kmeans/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_linear_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_resnet/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_logistic_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/local_standard_scaler/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/vertical_pearson/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_poisson_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_densenet/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/vertical_linear_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/local_data_split/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_vgg/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_vgg_jax/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/local_normalization/__init__.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 601 | 42 | 74 | py |
XFL | XFL-master/python/algorithm/config/vertical_feature_selection/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_gcn_mol/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/vertical_xgboost/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/vertical_logistic_regression/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/local_feature_preprocess/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_binning_woe_iv/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config/horizontal_resnet_paddle/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/model/bert_torch.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import BertConfig, BertForSequenceClassification
import torch.nn as nn
class BertForSst2Torch(nn.Module):
def __init__(self, from_pretrained=True, num_labels=None, **kwargs):
super().__init__()
if from_pretrained:
config = BertConfig.from_pretrained("bert-base-uncased", num_labels=num_labels)
self.bert = BertForSequenceClassification.from_pretrained('bert-base-uncased', config=config)
else:
config = BertConfig(num_labels=num_labels, **kwargs)
self.bert = BertForSequenceClassification(config=config)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_ids, attention_mask, token_type_ids, labels):
loss, logits = self.bert(input_ids = input_ids, attention_mask = attention_mask,
token_type_ids=token_type_ids, labels = labels)[:2]
prob = self.softmax(logits)
return loss, logits, prob | 1,564 | 43.714286 | 105 | py |
XFL | XFL-master/python/algorithm/model/bert.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import BertConfig, BertForSequenceClassification
import torch.nn as nn
class BertForSst2Torch(nn.Module):
def __init__(self, from_pretrained=True, num_labels=None, **kwargs):
super().__init__()
if from_pretrained:
config = BertConfig.from_pretrained("bert-base-uncased", num_labels=num_labels)
self.bert = BertForSequenceClassification.from_pretrained('bert-base-uncased', config=config)
else:
config = BertConfig(num_labels=num_labels, **kwargs)
self.bert = BertForSequenceClassification(config=config)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_ids, attention_mask, token_type_ids, labels):
loss, logits = self.bert(input_ids = input_ids, attention_mask = attention_mask,
token_type_ids=token_type_ids, labels = labels)[:2]
prob = self.softmax(logits)
return loss, logits, prob | 1,564 | 43.714286 | 105 | py |
XFL | XFL-master/python/algorithm/model/horizontal_k_means.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class HorizontalKMeans(nn.Module):
def __init__(self, input_dim, num_clusters) -> None:
super(HorizontalKMeans, self).__init__()
self.centroids = nn.Parameter(
torch.rand(num_clusters, input_dim)
)
def forward(self, x):
return x
| 922 | 30.827586 | 74 | py |
XFL | XFL-master/python/algorithm/model/resnet.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This model contains a PyTorch implementation of the paper "Deep Residual Learning for Image Recognition."[1]
# [1]He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770-778).
from collections import OrderedDict
import torch.nn as nn
class ConvBlock(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, downsample=None, stride=1):
super().__init__()
self.stem = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, padding=0)),
("batch_norm1", nn.BatchNorm2d(out_channels, track_running_stats=True)), # setting track_running_stats as False
("relu1", nn.ReLU()),
("conv2", nn.Conv2d(out_channels, out_channels,
kernel_size=3, stride=stride, padding=1)),
("batch_norm2", nn.BatchNorm2d(out_channels,track_running_stats=True)),
("relu2", nn.ReLU()),
("conv3", nn.Conv2d(out_channels, out_channels *
self.expansion, kernel_size=1, stride=1, padding=0)),
("batch_norm3", nn.BatchNorm2d(out_channels*self.expansion, track_running_stats=True))
]))
self.downsample = downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
residual = x
x = self.stem(x)
if self.downsample is not None:
residual = self.downsample(residual)
x += residual
x = self.relu(x)
return x
class Resnet(nn.Module):
def __init__(self, ResBlock, block_list, num_classes):
super().__init__()
self.stem = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)),
("batch_norm1", nn.BatchNorm2d(64, track_running_stats=True)),
("relu", nn.ReLU())
]))
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layers1 = self._make_layers(
ResBlock, block_list[0], inplanes=64, outplanes=64, stride=1)
self.layers2 = self._make_layers(
ResBlock, block_list[1], inplanes=256, outplanes=128, stride=2)
self.layers3 = self._make_layers(
ResBlock, block_list[2], inplanes=512, outplanes=256, stride=2)
self.layers4 = self._make_layers(
ResBlock, block_list[3], inplanes=1024, outplanes=512, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512*ResBlock.expansion, num_classes)
def forward(self, x):
x = self.stem(x)
x = self.max_pool(x)
x = self.layers1(x)
x = self.layers2(x)
x = self.layers3(x)
x = self.layers4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
def _make_layers(self, ResBlock, blocks, inplanes, outplanes, stride=1):
layers =[]
downsample = None
if stride != 1 or inplanes != outplanes*ResBlock.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, outplanes*ResBlock.expansion,
kernel_size=1, stride=stride),
nn.BatchNorm2d(outplanes*ResBlock.expansion, track_running_stats=True)
)
layers.append(ResBlock(inplanes, outplanes,
downsample=downsample, stride=stride))
for i in range(1, blocks):
layers.append(ResBlock(outplanes*ResBlock.expansion, outplanes))
return nn.Sequential(*layers)
def ResNet(num_classes, layers):
if layers == 18:
return Resnet(ConvBlock, [2, 2, 2, 2], num_classes)
if layers == 50:
return Resnet(ConvBlock, [3, 4, 6, 3], num_classes)
elif layers == 101:
return Resnet(ConvBlock, [3, 4, 23, 3], num_classes)
elif layers == 152:
return Resnet(ConvBlock, [3, 8, 36, 3], num_classes)
elif layers == 'unit_test':
return Resnet(ConvBlock, [2,2,2,2], num_classes)
else:
raise NotImplementedError("Only support ResNet50, ResNet101, ResNet152 currently, please change layers")
# if __name__ == "__main__":
# import torch
# from thop import profile, clever_format
# input = torch.randn(1, 3, 224, 224)
# model = ResNet(10,50)
# macs, params = profile(model, inputs=(input, ))
# macs, params = clever_format([macs, params], "%.3f")
# print(macs, params) | 5,173 | 37.902256 | 192 | py |
XFL | XFL-master/python/algorithm/model/vgg_jax.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This model contains a PyTorch implementation of the paper "Very Deep Convolutional Networks for Large-Scale Image Recognition."[1]
# [1]Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.
import jax.numpy as jnp
import flax.linen as nn
layers_cfg = {
'VGG11': [64, 'max', 128, 'max', 256, 256, 'max', 512, 512, 'max', 512, 512, 'max'],
'VGG13': [64, 64, 'max', 128, 128, 'max', 256, 256, 'max', 512, 512, 'max', 512, 512, 'max'],
'VGG16': [64, 64, 'max', 128, 128, 'max', 256, 256, 256, 'max', 512, 512, 512, 'max', 512, 512, 512, 'max'],
'VGG19': [64, 64, 'max', 128, 128, 'max', 256, 256, 256, 256, 'max', 512, 512, 512, 512, 'max', 512, 512, 512, 512, 'max'],
'unit_test': [64, 'max', 128, 'max', 256, 'max', 512, 'max']
}
class VggJax(nn.Module):
vgg_name: str
num_classes: int
@nn.compact
def __call__(self, x, train=True):
def adaptive_avg_pool(x):
return nn.avg_pool(x, window_shape=(x.shape[1], x.shape[2]), strides=(1,1))
def seq_max_pool(x):
return nn.max_pool(x, window_shape=(2, 2), strides=(2, 2), padding='VALID')
layers = []
for outplanes in layers_cfg[self.vgg_name]:
if outplanes == 'max':
layers.append(seq_max_pool)
else:
layers.extend([
nn.Conv(features=outplanes, kernel_size=(3, 3), padding=(1, 1)),
nn.BatchNorm(use_running_average=not train, momentum=0.9, epsilon=1e-5, dtype=jnp.float32),
nn.relu
])
layers.append(adaptive_avg_pool)
model = nn.Sequential(layers)
fc = nn.Dense(self.num_classes)
x = model(x)
x = x.reshape((x.shape[0], -1))
x = fc(x)
return x
def vggjax(num_classes, layers):
if layers == 11:
return VggJax("VGG11", num_classes)
elif layers == 13:
return VggJax("VGG13", num_classes)
elif layers == 16:
return VggJax("VGG16", num_classes)
elif layers == 19:
return VggJax("VGG19", num_classes)
elif layers == "unit_test":
return VggJax("unit_test", num_classes)
else:
raise NotImplementedError("Only support VGG11, VGG13, VGG16, VGG19 currently, please change layers") | 3,012 | 38.12987 | 142 | py |
XFL | XFL-master/python/algorithm/model/vgg.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This model contains a PyTorch implementation of the paper "Very Deep Convolutional Networks for Large-Scale Image Recognition."[1]
# [1]Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.
import torch
import torch.nn as nn
layers_cfg = {
'VGG11': [64, 'max', 128, 'max', 256, 256, 'max', 512, 512, 'max', 512, 512, 'max'],
'VGG13': [64, 64, 'max', 128, 128, 'max', 256, 256, 'max', 512, 512, 'max', 512, 512, 'max'],
'VGG16': [64, 64, 'max', 128, 128, 'max', 256, 256, 256, 'max', 512, 512, 512, 'max', 512, 512, 512, 'max'],
'VGG19': [64, 64, 'max', 128, 128, 'max', 256, 256, 256, 256, 'max', 512, 512, 512, 512, 'max', 512, 512, 512, 512, 'max'],
'unit_test': [64, 'max', 128, 'max', 256, 'max', 512, 'max']
}
class Vgg(nn.Module):
def __init__(self, vgg_name, num_classes):
super().__init__()
self.stem = self._make_layers(layers_cfg[vgg_name])
self.fc = nn.Linear(512, num_classes)
def forward(self, x):
x = self.stem(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def _make_layers(self, layers_cfg):
layers = []
in_planes = 3
for outplanes in layers_cfg:
if outplanes == 'max':
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
else:
layers.extend([nn.Conv2d(in_planes, outplanes, kernel_size=3, padding=1),
nn.BatchNorm2d(outplanes),
nn.ReLU(inplace=True)])
in_planes = outplanes
layers.append(nn.AdaptiveAvgPool2d((1, 1)))
return nn.Sequential(*layers)
def VGG(num_classes, layers):
if layers == 11:
return Vgg("VGG11", num_classes)
elif layers == 13:
return Vgg("VGG13", num_classes)
elif layers == 16:
return Vgg("VGG16", num_classes)
elif layers == 19:
return Vgg("VGG19", num_classes)
elif layers == "unit_test":
return Vgg("unit_test", num_classes)
else:
raise NotImplementedError("Only support VGG11, VGG13, VGG16, VGG!9 currently, please change layers") | 2,806 | 39.1 | 142 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.