repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
XFL | XFL-master/python/common/communication/gRPC/python/commu_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
| 159 | 31 | 75 | py |
XFL | XFL-master/python/common/communication/gRPC/python/channel.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from typing import Any, List, Union
from common.utils.logger import logger
from service.fed_config import FedConfig
from .commu import Commu
PARALLEL = True
# Note: now only dual_channel support wait option.
# Important: if wait option is on, one should not send None object.
class FullConnectedChannel(object):
def __init__(self, name: str, ids: list, job_id: Union[str, int] = 0, auto_offset: bool = True):
self.name = name
self.ids = ids
self.job_id = str(job_id)
self.send_lock = threading.Lock()
self.recv_lock = threading.Lock()
if Commu.node_id not in ids:
raise ValueError(f"Local node id {Commu.node_id} is not in input ids {ids}.")
if len([i for i in ids if i in Commu.trainer_ids + [Commu.scheduler_id]]) != len(ids):
raise ValueError(f"Input ids {ids} are illegal, must be in {Commu.trainer_ids + [Commu.scheduler_id]}.")
if len(ids) == 1:
raise ValueError("The created channel has only one node.")
self.auto_offset = auto_offset
self._send_offset = 0
self._recv_offset = 0
def _gen_send_key(self, remote_id: str, tag: str, accumulate_offset: bool) -> str:
# job_id -> channel_name -> offset -> tag -> start_end_id
send_key = '~'.join([self.job_id, self.name, str(self._send_offset), tag, Commu.node_id + '->' + remote_id])
if self.auto_offset and accumulate_offset:
self._send_offset += 1
return send_key
def _gen_recv_key(self, remote_id, tag: str, accumulate_offset: bool) -> str:
# job_id -> channel_name -> offset -> tag -> start_end_id
recv_key = '~'.join([self.job_id, self.name, str(self._recv_offset), tag, remote_id + '->' + Commu.node_id])
if self.auto_offset and accumulate_offset:
self._recv_offset += 1
return recv_key
def _send(self, remote_id: str, value: Any, tag: str = '@', accumulate_offset: bool = True,
use_pickle: bool = True) -> int:
key = self._gen_send_key(remote_id, tag, accumulate_offset)
logger.debug(f"Send {key} to {remote_id}")
status = Commu.send(key, value, remote_id, use_pickle)
logger.debug(f"Send {key} successfully!")
return status
def _recv(self, remote_id: str, tag: str = '@', accumulate_offset: bool = True, use_pickle: bool = True, wait: bool = True, default_value: any = None) -> Any:
key = self._gen_recv_key(remote_id, tag, accumulate_offset)
if wait:
logger.debug(f"Get {key}")
data = Commu.recv(key, use_pickle, wait, default_value)
if wait:
logger.debug(f"Get {key} successfully!")
else:
# if data is not None:
if data != default_value:
logger.debug(f"Get {key}")
logger.debug(f"Get {key} successfully!")
else:
if self.auto_offset and accumulate_offset:
self._recv_offset -= 1
return data
def _swap(self, remote_id: str, value: Any, tag: str = '@', use_pickle: bool = True) -> Any:
with self.send_lock:
status = self._send(remote_id, value, tag, True, use_pickle)
if status != 0:
raise ValueError(f"Receive response status {status} when send to remote id {remote_id}")
with self.recv_lock:
data = self._recv(remote_id, tag, True, use_pickle)
return data
def _broadcast(self, remote_ids: List[str], value: Any, tag: str = '@', use_pickle: bool = True) -> int:
br_status = 0
if PARALLEL:
thread_list = []
result_list = [None for id in remote_ids]
def func(i, *args):
result_list[i] = self._send(*args)
for i, id in enumerate(remote_ids):
task = threading.Thread(target=func, args=(i, id, value, tag, False, use_pickle))
thread_list.append(task)
for task in thread_list:
task.start()
for task in thread_list:
task.join()
for i, status in enumerate(result_list):
if status != 0:
br_status = status
raise ConnectionError(f"Message send to id {remote_ids[i]} not successful, response code {status}")
else:
for id in remote_ids:
status = self._send(id, value, tag, False, use_pickle)
if status != 0:
br_status = status
raise ConnectionError(f"Message send to id {id} not successful, response code {status}")
self._send_offset += 1
return br_status
def _scatter(self, remote_ids: List[str], values: List[Any], tag: str = '@', use_pickle: bool = True) -> int:
sc_status = 0
if PARALLEL:
thread_list = []
result_list = [None for id in remote_ids]
def func(i, *args):
result_list[i] = self._send(*args)
for i, id in enumerate(remote_ids):
task = threading.Thread(target=func, args=(i, id, values[i], tag, False, use_pickle))
thread_list.append(task)
for task in thread_list:
task.start()
for task in thread_list:
task.join()
for i, status in enumerate(result_list):
if status != 0:
sc_status = status
raise ConnectionError(f"Message send to id {remote_ids[i]} not successful, response code {status}")
else:
for i, id in enumerate(remote_ids):
status = self._send(id, values[i], tag, False, use_pickle)
if status != 0:
sc_status = status
raise ConnectionError(f"Message send to id {id} not successful, response code {status}")
self._send_offset += 1
return sc_status
def _collect(self, remote_ids: List[str], tag: str = '@', use_pickle: bool = True) -> List[Any]:
data = [None for i in range(len(remote_ids))]
if PARALLEL:
thread_list = []
def func(i, *args):
data[i] = self._recv(*args)
for i, id in enumerate(remote_ids):
task = threading.Thread(target=func, args=(i, id, tag, False, use_pickle))
thread_list.append(task)
for task in thread_list:
task.start()
for task in thread_list:
task.join()
else:
for i, id in enumerate(remote_ids):
data[i] = self._recv(id, tag, False, use_pickle)
self._recv_offset += 1
return data
class DualChannel(FullConnectedChannel):
def __init__(self, name: str, ids: list, job_id: Union[str, int] = "", auto_offset: bool = True):
""" A peer to peer channel.
Args:
name (str): channel name.
ids (list): list consist of ids for two parties.
job_id (Union[str, int], optional): job id of a federation when creating the channel,
if it is "", job_id will be obtained from XFL framwork automatically. Defaults to "".
auto_offset (bool, optional): whether auto accumulate the transmission times or not.
if it is False, tag should be set manually and make sure not repeat itself for two
communation rounds. Defaults to True.
"""
if job_id == "":
job_id = Commu.get_job_id()
super().__init__(name, ids, job_id=job_id, auto_offset=auto_offset)
self.remote_id = list(set(ids) - {Commu.node_id})[0]
def send(self, value: Any, tag: str = '@', use_pickle: bool = True) -> int:
# return self._send(self.remote_id, value, tag, True, use_pickle)
with self.send_lock:
status = self._send(self.remote_id, value, tag, True, use_pickle)
return status
def recv(self, tag: str = '@', use_pickle: bool = True, wait: bool = True, default_value: any = None) -> Any:
# return self._recv(self.remote_id, tag, True, use_pickle, wait, default_value)
with self.recv_lock:
status = self._recv(self.remote_id, tag, True, use_pickle, wait, default_value)
return status
def swap(self, value: Any, tag: str = '@', use_pickle: bool = True) -> Any:
return self._swap(self.remote_id, value, tag, use_pickle)
class BroadcastChannel(FullConnectedChannel):
def __init__(self, name: str, ids: List[str] = [], root_id: str = '', job_id: Union[str, int] = "",
auto_offset: bool = True):
if not root_id:
label_trainer_list = FedConfig.get_label_trainer()
root_id = label_trainer_list[0] if label_trainer_list else None
if not ids:
# ids = Commu.trainer_ids
ids = FedConfig.get_label_trainer() + FedConfig.get_trainer()
if root_id not in ids:
ids += [root_id]
if job_id == "":
job_id = Commu.get_job_id()
super().__init__(name, ids, job_id=job_id, auto_offset=auto_offset)
self.root_id = root_id
self.remote_ids = list(set(ids) - {root_id})
# for root id
def broadcast(self, value: Any, tag: str = '@', use_pickle: bool = True) -> int:
# return self._broadcast(self.remote_ids, value, tag, use_pickle)
with self.send_lock:
status = self._broadcast(self.remote_ids, value, tag, use_pickle)
return status
def scatter(self, values: List[Any], tag: str = '@', use_pickle: bool = True) -> int:
# return self._scatter(self.remote_ids, values, tag, use_pickle)
with self.send_lock:
status = self._scatter(self.remote_ids, values, tag, use_pickle)
return status
def collect(self, tag: str = '@', use_pickle: bool = True) -> List[Any]:
# return self._collect(self.remote_ids, tag, use_pickle)
with self.recv_lock:
status = self._collect(self.remote_ids, tag, use_pickle)
return status
# for remote ids
def send(self, value: Any, tag: str = '@', use_pickle: bool = True) -> int:
# return self._send(self.root_id, value, tag, True, use_pickle)
with self.send_lock:
status = self._send(self.root_id, value, tag, True, use_pickle)
return status
def recv(self, tag: str = '@', use_pickle: bool = True, wait: bool = True, default_value: any = None) -> Any:
# return self._recv(self.root_id, tag, True, use_pickle, wait, default_value)
with self.recv_lock:
status = self._recv(self.root_id, tag, True, use_pickle, wait, default_value)
return status
| 11,495 | 39.911032 | 162 | py |
XFL | XFL-master/python/common/communication/gRPC/python/scheduler_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: scheduler.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import checker_pb2 as checker__pb2
import commu_pb2 as commu__pb2
import status_pb2 as status__pb2
import control_pb2 as control__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0fscheduler.proto\x12\tscheduler\x1a\rchecker.proto\x1a\x0b\x63ommu.proto\x1a\x0cstatus.proto\x1a\rcontrol.proto\"3\n\x10GetConfigRequest\x12\x0e\n\x06nodeId\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t\"Q\n\x11GetConfigResponse\x12\r\n\x05jobId\x18\x01 \x01(\x05\x12\x0e\n\x06\x63onfig\x18\x02 \x01(\t\x12\x0c\n\x04\x63ode\x18\x03 \x01(\x05\x12\x0f\n\x07message\x18\x04 \x01(\t\"t\n\rDefaultConfig\x12\x34\n\x06\x63onfig\x18\x01 \x03(\x0b\x32$.scheduler.DefaultConfig.ConfigEntry\x1a-\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x19\n\x17GetAlgorithmListRequest\"\xe7\x01\n\x18GetAlgorithmListResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x15\n\ralgorithmList\x18\x02 \x03(\t\x12S\n\x10\x64\x65\x66\x61ultConfigMap\x18\x03 \x03(\x0b\x32\x39.scheduler.GetAlgorithmListResponse.DefaultConfigMapEntry\x1aQ\n\x15\x44\x65\x66\x61ultConfigMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.scheduler.DefaultConfig:\x02\x38\x01\"7\n\x12RecProgressRequest\x12\x0f\n\x07stageId\x18\x01 \x01(\x05\x12\x10\n\x08progress\x18\x02 \x01(\x05\"#\n\x13RecProgressResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\" \n\x0fGetStageRequest\x12\r\n\x05jobId\x18\x01 \x01(\x05\"\xa9\x01\n\x10GetStageResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x16\n\x0e\x63urrentStageId\x18\x02 \x01(\x05\x12\x15\n\rtotalStageNum\x18\x03 \x01(\x05\x12\x18\n\x10\x63urrentStageName\x18\x04 \x01(\t\x12\x11\n\tisRunning\x18\x05 \x01(\x08\x12+\n\x0bprogressBar\x18\x06 \x03(\x0b\x32\x16.scheduler.ProgressBar\"5\n\x0bProgressBar\x12\x0f\n\x07stageId\x18\x01 \x01(\x05\x12\x15\n\rstageProgress\x18\x02 \x01(\x05\x32\xd3\x04\n\tScheduler\x12H\n\tgetConfig\x12\x1b.scheduler.GetConfigRequest\x1a\x1c.scheduler.GetConfigResponse\"\x00\x12\x33\n\x04post\x12\x12.commu.PostRequest\x1a\x13.commu.PostResponse\"\x00(\x01\x12>\n\x07\x63ontrol\x12\x17.control.ControlRequest\x1a\x18.control.ControlResponse\"\x00\x12\x39\n\x06status\x12\x15.status.StatusRequest\x1a\x16.status.StatusResponse\"\x00\x12]\n\x10getAlgorithmList\x12\".scheduler.GetAlgorithmListRequest\x1a#.scheduler.GetAlgorithmListResponse\"\x00\x12N\n\x0brecProgress\x12\x1d.scheduler.RecProgressRequest\x1a\x1e.scheduler.RecProgressResponse\"\x00\x12\x45\n\x08getStage\x12\x1a.scheduler.GetStageRequest\x1a\x1b.scheduler.GetStageResponse\"\x00\x12V\n\x0f\x63heckTaskConfig\x12\x1f.checker.CheckTaskConfigRequest\x1a .checker.CheckTaskConfigResponse\"\x00\x62\x06proto3')
_GETCONFIGREQUEST = DESCRIPTOR.message_types_by_name['GetConfigRequest']
_GETCONFIGRESPONSE = DESCRIPTOR.message_types_by_name['GetConfigResponse']
_DEFAULTCONFIG = DESCRIPTOR.message_types_by_name['DefaultConfig']
_DEFAULTCONFIG_CONFIGENTRY = _DEFAULTCONFIG.nested_types_by_name['ConfigEntry']
_GETALGORITHMLISTREQUEST = DESCRIPTOR.message_types_by_name['GetAlgorithmListRequest']
_GETALGORITHMLISTRESPONSE = DESCRIPTOR.message_types_by_name['GetAlgorithmListResponse']
_GETALGORITHMLISTRESPONSE_DEFAULTCONFIGMAPENTRY = _GETALGORITHMLISTRESPONSE.nested_types_by_name['DefaultConfigMapEntry']
_RECPROGRESSREQUEST = DESCRIPTOR.message_types_by_name['RecProgressRequest']
_RECPROGRESSRESPONSE = DESCRIPTOR.message_types_by_name['RecProgressResponse']
_GETSTAGEREQUEST = DESCRIPTOR.message_types_by_name['GetStageRequest']
_GETSTAGERESPONSE = DESCRIPTOR.message_types_by_name['GetStageResponse']
_PROGRESSBAR = DESCRIPTOR.message_types_by_name['ProgressBar']
GetConfigRequest = _reflection.GeneratedProtocolMessageType('GetConfigRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCONFIGREQUEST,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.GetConfigRequest)
})
_sym_db.RegisterMessage(GetConfigRequest)
GetConfigResponse = _reflection.GeneratedProtocolMessageType('GetConfigResponse', (_message.Message,), {
'DESCRIPTOR' : _GETCONFIGRESPONSE,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.GetConfigResponse)
})
_sym_db.RegisterMessage(GetConfigResponse)
DefaultConfig = _reflection.GeneratedProtocolMessageType('DefaultConfig', (_message.Message,), {
'ConfigEntry' : _reflection.GeneratedProtocolMessageType('ConfigEntry', (_message.Message,), {
'DESCRIPTOR' : _DEFAULTCONFIG_CONFIGENTRY,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.DefaultConfig.ConfigEntry)
})
,
'DESCRIPTOR' : _DEFAULTCONFIG,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.DefaultConfig)
})
_sym_db.RegisterMessage(DefaultConfig)
_sym_db.RegisterMessage(DefaultConfig.ConfigEntry)
GetAlgorithmListRequest = _reflection.GeneratedProtocolMessageType('GetAlgorithmListRequest', (_message.Message,), {
'DESCRIPTOR' : _GETALGORITHMLISTREQUEST,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.GetAlgorithmListRequest)
})
_sym_db.RegisterMessage(GetAlgorithmListRequest)
GetAlgorithmListResponse = _reflection.GeneratedProtocolMessageType('GetAlgorithmListResponse', (_message.Message,), {
'DefaultConfigMapEntry' : _reflection.GeneratedProtocolMessageType('DefaultConfigMapEntry', (_message.Message,), {
'DESCRIPTOR' : _GETALGORITHMLISTRESPONSE_DEFAULTCONFIGMAPENTRY,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.GetAlgorithmListResponse.DefaultConfigMapEntry)
})
,
'DESCRIPTOR' : _GETALGORITHMLISTRESPONSE,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.GetAlgorithmListResponse)
})
_sym_db.RegisterMessage(GetAlgorithmListResponse)
_sym_db.RegisterMessage(GetAlgorithmListResponse.DefaultConfigMapEntry)
RecProgressRequest = _reflection.GeneratedProtocolMessageType('RecProgressRequest', (_message.Message,), {
'DESCRIPTOR' : _RECPROGRESSREQUEST,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.RecProgressRequest)
})
_sym_db.RegisterMessage(RecProgressRequest)
RecProgressResponse = _reflection.GeneratedProtocolMessageType('RecProgressResponse', (_message.Message,), {
'DESCRIPTOR' : _RECPROGRESSRESPONSE,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.RecProgressResponse)
})
_sym_db.RegisterMessage(RecProgressResponse)
GetStageRequest = _reflection.GeneratedProtocolMessageType('GetStageRequest', (_message.Message,), {
'DESCRIPTOR' : _GETSTAGEREQUEST,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.GetStageRequest)
})
_sym_db.RegisterMessage(GetStageRequest)
GetStageResponse = _reflection.GeneratedProtocolMessageType('GetStageResponse', (_message.Message,), {
'DESCRIPTOR' : _GETSTAGERESPONSE,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.GetStageResponse)
})
_sym_db.RegisterMessage(GetStageResponse)
ProgressBar = _reflection.GeneratedProtocolMessageType('ProgressBar', (_message.Message,), {
'DESCRIPTOR' : _PROGRESSBAR,
'__module__' : 'scheduler_pb2'
# @@protoc_insertion_point(class_scope:scheduler.ProgressBar)
})
_sym_db.RegisterMessage(ProgressBar)
_SCHEDULER = DESCRIPTOR.services_by_name['Scheduler']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DEFAULTCONFIG_CONFIGENTRY._options = None
_DEFAULTCONFIG_CONFIGENTRY._serialized_options = b'8\001'
_GETALGORITHMLISTRESPONSE_DEFAULTCONFIGMAPENTRY._options = None
_GETALGORITHMLISTRESPONSE_DEFAULTCONFIGMAPENTRY._serialized_options = b'8\001'
_GETCONFIGREQUEST._serialized_start=87
_GETCONFIGREQUEST._serialized_end=138
_GETCONFIGRESPONSE._serialized_start=140
_GETCONFIGRESPONSE._serialized_end=221
_DEFAULTCONFIG._serialized_start=223
_DEFAULTCONFIG._serialized_end=339
_DEFAULTCONFIG_CONFIGENTRY._serialized_start=294
_DEFAULTCONFIG_CONFIGENTRY._serialized_end=339
_GETALGORITHMLISTREQUEST._serialized_start=341
_GETALGORITHMLISTREQUEST._serialized_end=366
_GETALGORITHMLISTRESPONSE._serialized_start=369
_GETALGORITHMLISTRESPONSE._serialized_end=600
_GETALGORITHMLISTRESPONSE_DEFAULTCONFIGMAPENTRY._serialized_start=519
_GETALGORITHMLISTRESPONSE_DEFAULTCONFIGMAPENTRY._serialized_end=600
_RECPROGRESSREQUEST._serialized_start=602
_RECPROGRESSREQUEST._serialized_end=657
_RECPROGRESSRESPONSE._serialized_start=659
_RECPROGRESSRESPONSE._serialized_end=694
_GETSTAGEREQUEST._serialized_start=696
_GETSTAGEREQUEST._serialized_end=728
_GETSTAGERESPONSE._serialized_start=731
_GETSTAGERESPONSE._serialized_end=900
_PROGRESSBAR._serialized_start=902
_PROGRESSBAR._serialized_end=955
_SCHEDULER._serialized_start=958
_SCHEDULER._serialized_end=1553
# @@protoc_insertion_point(module_scope)
| 9,417 | 58.607595 | 2,562 | py |
XFL | XFL-master/python/common/communication/gRPC/python/commu_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: commu.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0b\x63ommu.proto\x12\x05\x63ommu\")\n\x0bPostRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\"\x1c\n\x0cPostResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x62\x06proto3')
_POSTREQUEST = DESCRIPTOR.message_types_by_name['PostRequest']
_POSTRESPONSE = DESCRIPTOR.message_types_by_name['PostResponse']
PostRequest = _reflection.GeneratedProtocolMessageType('PostRequest', (_message.Message,), {
'DESCRIPTOR' : _POSTREQUEST,
'__module__' : 'commu_pb2'
# @@protoc_insertion_point(class_scope:commu.PostRequest)
})
_sym_db.RegisterMessage(PostRequest)
PostResponse = _reflection.GeneratedProtocolMessageType('PostResponse', (_message.Message,), {
'DESCRIPTOR' : _POSTRESPONSE,
'__module__' : 'commu_pb2'
# @@protoc_insertion_point(class_scope:commu.PostResponse)
})
_sym_db.RegisterMessage(PostResponse)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_POSTREQUEST._serialized_start=22
_POSTREQUEST._serialized_end=63
_POSTRESPONSE._serialized_start=65
_POSTRESPONSE._serialized_end=93
# @@protoc_insertion_point(module_scope)
| 1,674 | 36.222222 | 263 | py |
XFL | XFL-master/python/common/communication/gRPC/python/commu.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pickle
import time
from typing import Any
from common.storage.redis.redis_conn import RedisConn
from common.utils.logger import logger
from service.fed_job import FedJob
from service.fed_node import FedNode
import commu_pb2
import scheduler_pb2_grpc
import trainer_pb2_grpc
MAX_BLOCK_SIZE = 1024 * 1024 # bytes
class Commu(object):
"""Implement peer to peer communication
"""
fed_info = {}
node = {}
node_id = ""
scheduler_id = ""
trainer_ids = ""
@classmethod
def __init__(cls, fed_info: dict):
# cls.* it to be deprecated
cls.federal_info = fed_info
cls.node = {}
cls.node["scheduler"] = fed_info["scheduler"]
cls.node.update(fed_info["trainer"])
cls.node_id = fed_info["node_id"]
cls.scheduler_id = "scheduler"
cls.trainer_ids = list(fed_info["trainer"].keys())
@classmethod
def _get_channel(cls, remote_id: str):
return FedNode.create_channel(remote_id)
@classmethod
def get_job_id(cls):
return FedJob.job_id
@classmethod
def send(cls, key: str, value: Any, dst: str, use_pickle: bool = True) -> int:
response = commu_pb2.PostResponse()
channel = cls._get_channel(dst)
if dst == "scheduler":
stub = scheduler_pb2_grpc.SchedulerStub(channel)
else:
stub = trainer_pb2_grpc.TrainerStub(channel)
request = commu_pb2.PostRequest()
request.key = key
if use_pickle:
value = pickle.dumps(value)
logger.debug(f"len of send msg: {len(value)}")
def request_generator():
n = math.ceil(1.0 * len(value) / MAX_BLOCK_SIZE)
for i in range(n):
request.value = value[i*MAX_BLOCK_SIZE: (i+1)*MAX_BLOCK_SIZE]
yield request
retry_num = 1
sleep_sec = 1
while True:
try:
response = stub.post(request_generator())
break
except Exception as ex:
logger.warning(ex, exc_info=True)
logger.warning(f"Send data retry {retry_num}...")
retry_num += 1
time.sleep(sleep_sec)
if sleep_sec < 30:
sleep_sec *= 2
return response.code
@classmethod
def recv(cls, key: str, use_pickle: bool = True, wait: bool = True, default_value: any = None) -> Any:
if wait:
data = RedisConn.cut(key)
else:
data = RedisConn.cut_if_exist(key)
if data is None:
return default_value
if use_pickle:
return pickle.loads(data)
else:
return data
| 3,411 | 29.464286 | 106 | py |
XFL | XFL-master/python/common/communication/gRPC/python/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/communication/gRPC/python/trainer_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import commu_pb2 as commu__pb2
import control_pb2 as control__pb2
import status_pb2 as status__pb2
class TrainerStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.post = channel.stream_unary(
'/trainer.Trainer/post',
request_serializer=commu__pb2.PostRequest.SerializeToString,
response_deserializer=commu__pb2.PostResponse.FromString,
)
self.control = channel.unary_unary(
'/trainer.Trainer/control',
request_serializer=control__pb2.ControlRequest.SerializeToString,
response_deserializer=control__pb2.ControlResponse.FromString,
)
self.status = channel.unary_unary(
'/trainer.Trainer/status',
request_serializer=status__pb2.StatusRequest.SerializeToString,
response_deserializer=status__pb2.StatusResponse.FromString,
)
class TrainerServicer(object):
"""Missing associated documentation comment in .proto file."""
def post(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def control(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def status(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TrainerServicer_to_server(servicer, server):
rpc_method_handlers = {
'post': grpc.stream_unary_rpc_method_handler(
servicer.post,
request_deserializer=commu__pb2.PostRequest.FromString,
response_serializer=commu__pb2.PostResponse.SerializeToString,
),
'control': grpc.unary_unary_rpc_method_handler(
servicer.control,
request_deserializer=control__pb2.ControlRequest.FromString,
response_serializer=control__pb2.ControlResponse.SerializeToString,
),
'status': grpc.unary_unary_rpc_method_handler(
servicer.status,
request_deserializer=status__pb2.StatusRequest.FromString,
response_serializer=status__pb2.StatusResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'trainer.Trainer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Trainer(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def post(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/trainer.Trainer/post',
commu__pb2.PostRequest.SerializeToString,
commu__pb2.PostResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def control(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/trainer.Trainer/control',
control__pb2.ControlRequest.SerializeToString,
control__pb2.ControlResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def status(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/trainer.Trainer/status',
status__pb2.StatusRequest.SerializeToString,
status__pb2.StatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 5,347 | 38.614815 | 96 | py |
XFL | XFL-master/python/common/communication/gRPC/python/trainer_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: trainer.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import commu_pb2 as commu__pb2
import status_pb2 as status__pb2
import control_pb2 as control__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rtrainer.proto\x12\x07trainer\x1a\x0b\x63ommu.proto\x1a\x0cstatus.proto\x1a\rcontrol.proto2\xb9\x01\n\x07Trainer\x12\x33\n\x04post\x12\x12.commu.PostRequest\x1a\x13.commu.PostResponse\"\x00(\x01\x12>\n\x07\x63ontrol\x12\x17.control.ControlRequest\x1a\x18.control.ControlResponse\"\x00\x12\x39\n\x06status\x12\x15.status.StatusRequest\x1a\x16.status.StatusResponse\"\x00\x62\x06proto3')
_TRAINER = DESCRIPTOR.services_by_name['Trainer']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_TRAINER._serialized_start=69
_TRAINER._serialized_end=254
# @@protoc_insertion_point(module_scope)
| 1,286 | 40.516129 | 448 | py |
XFL | XFL-master/python/common/communication/gRPC/python/checker_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: checker.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rchecker.proto\x12\x07\x63hecker\"\x1b\n\x0c\x44ictPathInfo\x12\x0b\n\x03key\x18\x01 \x01(\t\"\x1d\n\x0cListPathInfo\x12\r\n\x05index\x18\x01 \x01(\x05\"l\n\x08PathInfo\x12)\n\x08\x64ictPath\x18\x01 \x01(\x0b\x32\x15.checker.DictPathInfoH\x00\x12)\n\x08listPath\x18\x02 \x01(\x0b\x32\x15.checker.ListPathInfoH\x00\x42\n\n\x08pathInfo\">\n\x08ItemInfo\x12#\n\x08pathInfo\x18\x01 \x03(\x0b\x32\x11.checker.PathInfo\x12\r\n\x05notes\x18\x02 \x01(\t\"N\n\x16\x43rossStagePositionInfo\x12\x0f\n\x07stageId\x18\x01 \x01(\x05\x12#\n\x08pathInfo\x18\x02 \x03(\x0b\x32\x11.checker.PathInfo\"`\n\x12\x43rossStageItemInfo\x12\x13\n\x0b\x64umpedValue\x18\x01 \x01(\t\x12\x35\n\x0cpositionList\x18\x02 \x03(\x0b\x32\x1f.checker.CrossStagePositionInfo\"\x9f\x01\n\x0bStageResult\x12\x0f\n\x07stageId\x18\x01 \x01(\x05\x12\x1b\n\x13\x64umpedCheckedConfig\x18\x02 \x01(\t\x12)\n\x0eunmatchedItems\x18\x03 \x03(\x0b\x32\x11.checker.ItemInfo\x12\x13\n\x0bpassedRules\x18\x04 \x01(\x05\x12\x14\n\x0c\x63heckedRules\x18\x05 \x01(\x05\x12\x0c\n\x04\x63ode\x18\x06 \x01(\x05\"O\n\x10MultiStageResult\x12-\n\x0fstageResultList\x18\x01 \x03(\x0b\x32\x14.checker.StageResult\x12\x0c\n\x04\x63ode\x18\x02 \x01(\x05\"\xca\x01\n\x10\x43rossStageResult\x12:\n\x15\x64uplicatedInputOutput\x18\x01 \x03(\x0b\x32\x1b.checker.CrossStageItemInfo\x12\x35\n\x10\x62lankInputOutput\x18\x02 \x03(\x0b\x32\x1b.checker.CrossStageItemInfo\x12\x35\n\x10nonexistentInput\x18\x03 \x03(\x0b\x32\x1b.checker.CrossStageItemInfo\x12\x0c\n\x04\x63ode\x18\x04 \x01(\x05\"M\n\x16\x43heckTaskConfigRequest\x12\x19\n\x11\x64umpedTrainConfig\x18\x01 \x01(\t\x12\x18\n\x10\x65xistedInputPath\x18\x02 \x03(\t\"\xa2\x01\n\x17\x43heckTaskConfigResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x33\n\x10multiStageResult\x18\x03 \x01(\x0b\x32\x19.checker.MultiStageResult\x12\x33\n\x10\x63rossStageResult\x18\x04 \x01(\x0b\x32\x19.checker.CrossStageResultb\x06proto3')
_DICTPATHINFO = DESCRIPTOR.message_types_by_name['DictPathInfo']
_LISTPATHINFO = DESCRIPTOR.message_types_by_name['ListPathInfo']
_PATHINFO = DESCRIPTOR.message_types_by_name['PathInfo']
_ITEMINFO = DESCRIPTOR.message_types_by_name['ItemInfo']
_CROSSSTAGEPOSITIONINFO = DESCRIPTOR.message_types_by_name['CrossStagePositionInfo']
_CROSSSTAGEITEMINFO = DESCRIPTOR.message_types_by_name['CrossStageItemInfo']
_STAGERESULT = DESCRIPTOR.message_types_by_name['StageResult']
_MULTISTAGERESULT = DESCRIPTOR.message_types_by_name['MultiStageResult']
_CROSSSTAGERESULT = DESCRIPTOR.message_types_by_name['CrossStageResult']
_CHECKTASKCONFIGREQUEST = DESCRIPTOR.message_types_by_name['CheckTaskConfigRequest']
_CHECKTASKCONFIGRESPONSE = DESCRIPTOR.message_types_by_name['CheckTaskConfigResponse']
DictPathInfo = _reflection.GeneratedProtocolMessageType('DictPathInfo', (_message.Message,), {
'DESCRIPTOR' : _DICTPATHINFO,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.DictPathInfo)
})
_sym_db.RegisterMessage(DictPathInfo)
ListPathInfo = _reflection.GeneratedProtocolMessageType('ListPathInfo', (_message.Message,), {
'DESCRIPTOR' : _LISTPATHINFO,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.ListPathInfo)
})
_sym_db.RegisterMessage(ListPathInfo)
PathInfo = _reflection.GeneratedProtocolMessageType('PathInfo', (_message.Message,), {
'DESCRIPTOR' : _PATHINFO,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.PathInfo)
})
_sym_db.RegisterMessage(PathInfo)
ItemInfo = _reflection.GeneratedProtocolMessageType('ItemInfo', (_message.Message,), {
'DESCRIPTOR' : _ITEMINFO,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.ItemInfo)
})
_sym_db.RegisterMessage(ItemInfo)
CrossStagePositionInfo = _reflection.GeneratedProtocolMessageType('CrossStagePositionInfo', (_message.Message,), {
'DESCRIPTOR' : _CROSSSTAGEPOSITIONINFO,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.CrossStagePositionInfo)
})
_sym_db.RegisterMessage(CrossStagePositionInfo)
CrossStageItemInfo = _reflection.GeneratedProtocolMessageType('CrossStageItemInfo', (_message.Message,), {
'DESCRIPTOR' : _CROSSSTAGEITEMINFO,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.CrossStageItemInfo)
})
_sym_db.RegisterMessage(CrossStageItemInfo)
StageResult = _reflection.GeneratedProtocolMessageType('StageResult', (_message.Message,), {
'DESCRIPTOR' : _STAGERESULT,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.StageResult)
})
_sym_db.RegisterMessage(StageResult)
MultiStageResult = _reflection.GeneratedProtocolMessageType('MultiStageResult', (_message.Message,), {
'DESCRIPTOR' : _MULTISTAGERESULT,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.MultiStageResult)
})
_sym_db.RegisterMessage(MultiStageResult)
CrossStageResult = _reflection.GeneratedProtocolMessageType('CrossStageResult', (_message.Message,), {
'DESCRIPTOR' : _CROSSSTAGERESULT,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.CrossStageResult)
})
_sym_db.RegisterMessage(CrossStageResult)
CheckTaskConfigRequest = _reflection.GeneratedProtocolMessageType('CheckTaskConfigRequest', (_message.Message,), {
'DESCRIPTOR' : _CHECKTASKCONFIGREQUEST,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.CheckTaskConfigRequest)
})
_sym_db.RegisterMessage(CheckTaskConfigRequest)
CheckTaskConfigResponse = _reflection.GeneratedProtocolMessageType('CheckTaskConfigResponse', (_message.Message,), {
'DESCRIPTOR' : _CHECKTASKCONFIGRESPONSE,
'__module__' : 'checker_pb2'
# @@protoc_insertion_point(class_scope:checker.CheckTaskConfigResponse)
})
_sym_db.RegisterMessage(CheckTaskConfigResponse)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DICTPATHINFO._serialized_start=26
_DICTPATHINFO._serialized_end=53
_LISTPATHINFO._serialized_start=55
_LISTPATHINFO._serialized_end=84
_PATHINFO._serialized_start=86
_PATHINFO._serialized_end=194
_ITEMINFO._serialized_start=196
_ITEMINFO._serialized_end=258
_CROSSSTAGEPOSITIONINFO._serialized_start=260
_CROSSSTAGEPOSITIONINFO._serialized_end=338
_CROSSSTAGEITEMINFO._serialized_start=340
_CROSSSTAGEITEMINFO._serialized_end=436
_STAGERESULT._serialized_start=439
_STAGERESULT._serialized_end=598
_MULTISTAGERESULT._serialized_start=600
_MULTISTAGERESULT._serialized_end=679
_CROSSSTAGERESULT._serialized_start=682
_CROSSSTAGERESULT._serialized_end=884
_CHECKTASKCONFIGREQUEST._serialized_start=886
_CHECKTASKCONFIGREQUEST._serialized_end=963
_CHECKTASKCONFIGRESPONSE._serialized_start=966
_CHECKTASKCONFIGRESPONSE._serialized_end=1128
# @@protoc_insertion_point(module_scope)
| 7,401 | 53.82963 | 2,011 | py |
XFL | XFL-master/python/common/communication/gRPC/python/control_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: control.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rcontrol.proto\x12\x07\x63ontrol\"5\n\x0e\x43ontrolRequest\x12#\n\x07\x63ontrol\x18\x01 \x01(\x0e\x32\x12.control.Operation\".\n\x0bNodeLogPath\x12\x0e\n\x06nodeId\x18\x01 \x01(\t\x12\x0f\n\x07logPath\x18\x02 \x01(\t\"D\n\x10StageNodeLogPath\x12\x0f\n\x07stageId\x18\x01 \x01(\x05\x12\x0e\n\x06nodeId\x18\x02 \x01(\t\x12\x0f\n\x07logPath\x18\x03 \x01(\t\"\xba\x01\n\x0f\x43ontrolResponse\x12\r\n\x05jobId\x18\x01 \x01(\x05\x12\x0c\n\x04\x63ode\x18\x02 \x01(\x05\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x19\n\x11\x64umpedTrainConfig\x18\x04 \x01(\t\x12)\n\x0bnodeLogPath\x18\x05 \x03(\x0b\x32\x14.control.NodeLogPath\x12\x33\n\x10stageNodeLogPath\x18\x06 \x03(\x0b\x32\x19.control.StageNodeLogPath*F\n\tOperation\x12\r\n\tOPERATION\x10\x00\x12\t\n\x05START\x10\x01\x12\x08\n\x04STOP\x10\x02\x12\t\n\x05PAUSE\x10\x03\x12\n\n\x06UPDATE\x10\x04\x62\x06proto3')
_OPERATION = DESCRIPTOR.enum_types_by_name['Operation']
Operation = enum_type_wrapper.EnumTypeWrapper(_OPERATION)
OPERATION = 0
START = 1
STOP = 2
PAUSE = 3
UPDATE = 4
_CONTROLREQUEST = DESCRIPTOR.message_types_by_name['ControlRequest']
_NODELOGPATH = DESCRIPTOR.message_types_by_name['NodeLogPath']
_STAGENODELOGPATH = DESCRIPTOR.message_types_by_name['StageNodeLogPath']
_CONTROLRESPONSE = DESCRIPTOR.message_types_by_name['ControlResponse']
ControlRequest = _reflection.GeneratedProtocolMessageType('ControlRequest', (_message.Message,), {
'DESCRIPTOR' : _CONTROLREQUEST,
'__module__' : 'control_pb2'
# @@protoc_insertion_point(class_scope:control.ControlRequest)
})
_sym_db.RegisterMessage(ControlRequest)
NodeLogPath = _reflection.GeneratedProtocolMessageType('NodeLogPath', (_message.Message,), {
'DESCRIPTOR' : _NODELOGPATH,
'__module__' : 'control_pb2'
# @@protoc_insertion_point(class_scope:control.NodeLogPath)
})
_sym_db.RegisterMessage(NodeLogPath)
StageNodeLogPath = _reflection.GeneratedProtocolMessageType('StageNodeLogPath', (_message.Message,), {
'DESCRIPTOR' : _STAGENODELOGPATH,
'__module__' : 'control_pb2'
# @@protoc_insertion_point(class_scope:control.StageNodeLogPath)
})
_sym_db.RegisterMessage(StageNodeLogPath)
ControlResponse = _reflection.GeneratedProtocolMessageType('ControlResponse', (_message.Message,), {
'DESCRIPTOR' : _CONTROLRESPONSE,
'__module__' : 'control_pb2'
# @@protoc_insertion_point(class_scope:control.ControlResponse)
})
_sym_db.RegisterMessage(ControlResponse)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_OPERATION._serialized_start=388
_OPERATION._serialized_end=458
_CONTROLREQUEST._serialized_start=26
_CONTROLREQUEST._serialized_end=79
_NODELOGPATH._serialized_start=81
_NODELOGPATH._serialized_end=127
_STAGENODELOGPATH._serialized_start=129
_STAGENODELOGPATH._serialized_end=197
_CONTROLRESPONSE._serialized_start=200
_CONTROLRESPONSE._serialized_end=386
# @@protoc_insertion_point(module_scope)
| 3,522 | 45.973333 | 920 | py |
XFL | XFL-master/python/common/storage/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/storage/redis/redis_conn.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from typing import Any
import redis
from common.utils.config import load_json_config
from service.fed_node import FedNode
class RedisConn(object):
redis_config = {}
retry_interval = None
retry_duration = None
rs = redis.StrictRedis()
redis_host = ""
@classmethod
def init_redis(cls):
config = load_json_config(os.path.abspath(
os.path.join(os.path.dirname(__file__), "../../../", "common/storage/redis/data_pool_config.json")))
cls.redis_config = config["redis"]
cls.redis_host = FedNode.redis_host # or cls.redis_config.get("host")
cls.redis_port = FedNode.redis_port # or cls.redis_config.get("port")
cls.redis_config["host"] = cls.redis_host
cls.redis_config["port"] = cls.redis_port
cls.retry_interval = config.get("retry_interval")
cls.retry_duration = config.get("retry_duration")
pool = redis.ConnectionPool(host=cls.redis_config["host"], port=cls.redis_config["port"],
db=0, decode_responses=False)
cls.rs = redis.StrictRedis(connection_pool=pool)
cls.init_job_id()
@classmethod
def init_job_id(cls):
if cls.rs.get("XFL_JOB_ID") is None:
cls.rs.set("XFL_JOB_ID", 0)
@classmethod
def put(cls, key: str, value: Any) -> int:
status = cls.rs.set(key, value, ex=cls.redis_config["expire_seconds"])
return status
@classmethod
def set(cls, key: str, value: Any, ex=-1) -> int:
if ex > 0:
return cls.rs.set(key, value, ex)
else:
return cls.rs.set(key, value)
@classmethod
def get(cls, key: str) -> Any:
return cls.rs.get(key)
@classmethod
def incr(cls, key: str):
return cls.rs.incr(key)
@classmethod
def cut(cls, key: str) -> Any:
start = time.time()
while True:
if cls.rs.exists(key):
res = cls.rs.get(key)
cls.rs.delete(key)
return res
time.sleep(cls.retry_interval)
if (time.time() - start) > cls.retry_duration:
raise KeyError(f"Retry Timeout, Key {key} not found")
@classmethod
def cut_if_exist(cls, key: str) -> Any:
time.sleep(1e-6)
if cls.rs.exists(key):
res = cls.rs.get(key)
cls.rs.delete(key)
return res
else:
return None
| 3,077 | 30.408163 | 112 | py |
XFL | XFL-master/python/common/storage/redis/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/checker/x_types.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import Base
class String(Base):
def __init__(self, default: str = ""):
super().__init__()
self.default = default
# self.filled = None
class Bool(Base):
def __init__(self, default: bool = True):
super().__init__()
self.default = default
class Integer(Base):
def __init__(self, default: int = 0):
super().__init__()
self.default = default
def gt(self, value):
self.add_rule(lambda x: x > value, f"greater than {value}")
return self
def ge(self, value):
self.add_rule(lambda x: x >= value, f"greater equal than {value}")
return self
def lt(self, value):
self.add_rule(lambda x: x < value, f"less than {value}")
return self
def le(self, value):
self.add_rule(lambda x: x <= value, f"less equal than {value}")
return self
class Float(Base):
def __init__(self, default: float = 0):
super().__init__()
self.default = default
def gt(self, value):
self.add_rule(lambda x: x > value, f"greater than {value}")
return self
def ge(self, value):
self.add_rule(lambda x: x >= value, f"greater equal than {value}")
return self
def lt(self, value):
self.add_rule(lambda x: x < value, f"less than {value}")
return self
def le(self, value):
self.add_rule(lambda x: x <= value, f"less equal than {value}")
return self
class Any(Base):
'''
任意值
'''
def __init__(self, default=None):
super().__init__()
self.default = default
def __eq__(self, __o: object) -> bool:
if isinstance(__o, (list, dict, tuple)):
return False
else:
return True
def __hash__(self) -> int:
return int(''.join(map(lambda x: '%.3d' % ord(x), self.__name__ + "1234567890")))
class All(Base):
def __init__(self, default=None):
super().__init__()
self.default = default
def __eq__(self, __o: object) -> bool:
return True
def __hash__(self) -> int:
return int(''.join(map(lambda x: '%.3d' % ord(x), self.__name__ + "1234567890"))) | 2,925 | 27.407767 | 89 | py |
XFL | XFL-master/python/common/checker/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OneOf, AtLeastOneOf, RepeatableAtLeastOneOf, Required, Optional, Any
String, Bool, Integer, Float
"""
from typing import Callable
class Base(object):
def __init__(self):
self.default = None
self.rules = []
self.checked = []
# def set_default(self, *value):
# if len(value) == 1:
# self.default = value[0]
# else:
# self.default = value
# return self
def add_rule(self, rule: Callable, desp: str = ''):
'''
rule 可以接受一个参数,也可以接受两个参数。第一个参数表示当前位置的值,第二参数表示要检查的config。
'''
self.rules.append((rule, desp))
return self
# def check(self): # TODO: add traceback
# self.checked = []
# for rule, desp in self.rules:
# try:
# is_pass = rule(self.value)
# except Exception:
# is_pass = False
# self.checked.append((is_pass, desp))
# return self.checked
@property
def __name__(self):
return self.__class__.__name__
def __hash__(self) -> int:
return int(''.join(map(lambda x: '%.3d' % ord(x), self.__name__ + "1234567890")))
| 1,807 | 27.698413 | 89 | py |
XFL | XFL-master/python/common/checker/get_default.py | import copy
from .qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from .x_types import String, Bool, Integer, Float, Any, All
def get_default(descriptor):
if isinstance(descriptor, dict):
if "__rule__" not in descriptor:
descriptor_copy = copy.deepcopy(descriptor)
descriptor_copy["__rule__"] = [Required(*list(descriptor.keys()))]
return get_default(descriptor_copy)
else:
if not isinstance(descriptor.get("__rule__"), list):
descriptor_copy = copy.deepcopy(descriptor)
descriptor_copy["__rule__"] = [descriptor["__rule__"]]
return get_default(descriptor_copy)
else:
descriptor_copy = copy.deepcopy(descriptor)
is_continue = True
for i, item in enumerate(descriptor["__rule__"]):
if isinstance(item, Optional):
if item.default is not None:
descriptor_copy["__rule__"][i] = item.default
is_continue = False
if not is_continue:
return get_default(descriptor_copy)
else:
res = {}
for item in descriptor["__rule__"]:
if isinstance(item, OneOf):
key = get_default(item.default)
res[key] = get_default(descriptor[key])
elif isinstance(item, SomeOf):
if isinstance(item.default, (list, tuple)):
for k in item.default:
key = get_default(k)
res[key] = get_default(descriptor[key])
else:
key = get_default(item.default)
res[key] = get_default(descriptor[key])
elif isinstance(item, Required):
for k in item.default:
key = get_default(k)
res[key] = get_default(descriptor[key])
elif isinstance(item, Optional):
if item.default is None:
pass
else:
raise ValueError("Code is not well developed.")
# else:
# res[get_default(item.default)] = descriptor[item.default]
# # res[item.default] = get_default(descriptor[item.default])
# if isinstance(item.default, (str, int, float)):
# res[item.default] = get_default(descriptor[item.default])
# elif isinstance(item, (String, Integer, Float)):
# res[get_default(item.default)] = get_default(descriptor[item.default])
# elif isinstance(item.default, (bool, Bool)):
# raise ValueError("Rule is not set correctly.")
# else:
# raise ValueError("Code is not well developed.")
# for k in item.default:
# key = get_default(k)
# res[key] = get_default(descriptor[key])
elif isinstance(item, RepeatableSomeOf):
raise ValueError("Rule is not set correctly.")
elif isinstance(item, (String, Bool, Integer, Float)):
key = get_default(item.default)
key2 = None
for k in descriptor.keys():
if k.__hash__() == item.__class__().__hash__():
key2 = k
res[key] = get_default(descriptor[key2])
elif isinstance(item, Any):
pass
else:
res[item] = get_default(descriptor[item])
return res
elif isinstance(descriptor, list):
if len(descriptor) == 0:
return []
elif len(descriptor) == 1:
if isinstance(descriptor[0], OneOf):
return [get_default(descriptor[0].default)]
elif isinstance(descriptor[0], (SomeOf, RepeatableSomeOf)):
if descriptor[0].default is None:
return []
else:
return [get_default(v) for v in descriptor[0].default]
elif isinstance(descriptor[0], Required):
raise ValueError("Rule is not set correctly.")
elif isinstance(descriptor[0], Optional):
if descriptor[0].default is None:
return []
else:
return get_default([descriptor[0].default])
elif isinstance(descriptor[0], (String, Bool, Integer, Float)):
return [descriptor[0].default]
elif isinstance(descriptor[0], Any):
return []
else:
return [get_default(v) for v in descriptor]
else:
return [get_default(v) for v in descriptor]
elif isinstance(descriptor, (OneOf, Optional)):
return get_default(descriptor.default)
elif isinstance(descriptor, (SomeOf, RepeatableSomeOf, Required)):
raise ValueError("Rule is not set correctly.")
elif isinstance(descriptor, (String, Bool, Integer, Float)):
return descriptor.default
elif isinstance(descriptor, (Any, All)):
return None
else:
return descriptor
| 6,097 | 48.983607 | 108 | py |
XFL | XFL-master/python/common/checker/qualifiers.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import Base
class OfBase(Base):
def __init__(self, *args, default=None):
super().__init__()
self.candidates = args
self.default = None
def set_default(self, value):
'''设置default值'''
self.default = value
return self
class OneOf(OfBase):
'''
只选择一个
'''
def __init__(self, *args, default=None):
super().__init__(*args, default=default)
if self.default is None:
self.default = self.candidates[0]
def set_default_index(self, idx):
'''设置default的index'''
self.default = self.candidates[idx]
return self
# def in_(self, values: list):
# def f(x):
# return x in values
# self.add_rule(f, f"is in {values}")
# return self
class SomeOf(OfBase):
'''
不放回的选多个(>=1, <=候选数)
'''
def __init__(self, *args, default=None):
super().__init__(*args, default=default)
if self.default is None:
self.default = [self.candidates[0]]
def set_default_indices(self, *idx):
'''设置default的index,可以有多个'''
self.default = [self.candidates[i] for i in idx]
return self
# def in_(self, values: list):
# def f(x):
# for v in x:
# if v not in values:
# return False
# return True
# self.add_rule(f, f"is in {values}")
# return self
# repeatable, many
class RepeatableSomeOf(OfBase):
'''
有放回的选多个
'''
def __init__(self, *args, default=None):
super().__init__(*args, default=default)
if self.default is None:
self.default = [self.candidates[0]]
def set_default_indices(self, *idx):
'''设置default的index,可以有多个'''
self.default = [self.candidates[i] for i in idx]
return self
# def in_(self, values: list):
# def f(x):
# for v in x:
# if v not in values:
# return False
# return True
# self.add_rule(f, f"is in {values}")
# return self
class Required(OfBase):
'''
必须都存在
'''
def __init__(self, *args):
super().__init__(*args)
self.default = args
class Optional(OfBase):
'''
在dict的__rule__中表示该key可不存在,在list中表示list可为空,其他地方表示该值可为None。
注:Optional只能接受一个参数。
'''
def __init__(self, *args, default=None):
super().__init__(*args, default=default)
def set_default_not_none(self):
'''设置default为非None值'''
self.default = self.candidates[0]
return self
# class Any(OfBase):
# '''
# 任意值
# '''
# def __init__(self, default=None):
# super().__init__(default=default)
# def __eq__(self, __o: object) -> bool:
# return True
# def __hash__(self) -> int:
# return int(''.join(map(lambda x: '%.3d' % ord(x), self.__name__() + "1234567890")))
| 3,682 | 25.883212 | 93 | py |
XFL | XFL-master/python/common/checker/matcher.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .checker import check, Checked
# For sync in algorithms
def get_matched_config(config, rule):
r = check(config, rule)
def get_matched(checked):
if isinstance(checked, Checked):
if isinstance(checked.value, dict):
tmp = {}
for k, v in checked.value.items():
if hasattr(k, 'is_match'):
if k.is_match:
tmp.update({k.value: get_matched(v)})
else:
tmp.update({k: get_matched(v)})
return tmp
else:
if checked.is_match:
return checked.value
else:
return None
else:
return checked
return get_matched(r)
| 1,420 | 31.295455 | 74 | py |
XFL | XFL-master/python/common/checker/checker.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import numpy as np
from .qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from .x_types import String, Bool, Integer, Float, Any, All
class Checked():
def __init__(self, value, is_match: bool, reason: str = ''):
self.value = value
self.is_match = is_match
self.reason = reason
def result(self):
is_match = "Match" if self.is_match else "Not Match"
if isinstance(self.value, dict):
res = {
"__match__": ':'.join([is_match, self.reason])
}
res.update(
{
k.result() if isinstance(k, Checked) else k: v.result() if isinstance(v, Checked) else v for k, v in self.value.items()
# if isinstance(k, Checked):
# k.result()
# else:
# k
# for k, v in self.value.items():
# if isinstance(v, Checked):
# v.result()
# else:
# v
}
)
elif isinstance(self.value, list):
res = ['__match__: ' + '-'.join([is_match, self.reason])] + [v.result() if isinstance(v, Checked) else v for v in self.value]
elif isinstance(self.value, Checked):
res = self.value.result()
else:
res = '-'.join(['(' + str(self.value) + ')', is_match, self.reason])
return res
def breif_result(self):
is_match = "Match" if self.is_match else "Not Match"
if isinstance(self.value, dict):
precise_res = {}
if not self.is_match:
precise_res.update(
{
"__match__": ':'.join([is_match, self.reason])
}
)
precise_res.update(
{
k.breif_result() if isinstance(k, Checked) else k: v.breif_result() if isinstance(v, Checked) else v for k, v in self.value.items()
}
)
elif isinstance(self.value, list):
precise_res = [v.breif_result() if isinstance(v, Checked) else v for v in self.value]
if not self.is_match:
precise_res.insert(0, '__match__: ' + '-'.join([is_match, self.reason]))
elif isinstance(self.value, Checked):
precise_res = self.value.breif_result()
else:
if not self.is_match:
precise_res = '-'.join(['(' + str(self.value) + ')', is_match, self.reason])
else:
precise_res = self.value
return precise_res
# def breif_result(self, path_histroy: list = []):
# is_match = "Match" if self.is_match else "Not Match"
# if isinstance(self.value, dict):
# precise_res = {}
# itemized_res = []
# if not self.is_match:
# precise_res.update(
# {
# "__match__": ':'.join([is_match, self.reason])
# }
# )
# itemized_res.append(path_histroy + [self.reason])
# precise_res.update(
# {
# k.breif_result() if isinstance(k, Checked) else k: v.breif_result() if isinstance(v, Checked) else v for k, v in self.value.items()
# }
# )
# for k, v in self.value.items():
# if isinstance(k, Checked):
# k.breif_result()
# else:
# k
# if isinstance(v, Checked):
# v.breif_result()
# else:
# v
# elif isinstance(self.value, list):
# precise_res = [v.breif_result() if isinstance(v, Checked) else v for v in self.value]
# if not self.is_match:
# precise_res.insert(0, '__match__: ' + '-'.join([is_match, self.reason]))
# elif isinstance(self.value, Checked):
# precise_res = self.value.breif_result()
# else:
# if not self.is_match:
# precise_res = '-'.join(['(' + str(self.value) + ')', is_match, self.reason])
# else:
# precise_res = self.value
# return precise_res
# [{'type': 'dict', 'key': 'aaa'}, {'type': 'list', 'index': 1}, 'reason']
def _get_real_value(self, value):
if isinstance(value, Checked):
return self._get_real_value(value.value)
else:
return value
def get_unmatch_position(self, position_chain = []):
position = []
if isinstance(self.value, dict):
if not self.is_match:
position.append(position_chain + [self.reason])
for k, v in self.value.items():
if isinstance(k, Checked):
# r = k.get_unmatch_position(position_chain + [{'type': 'dict', 'key': k.value}])
r = k.get_unmatch_position(position_chain)
if r != []:
position += r
if isinstance(v, Checked):
r = v.get_unmatch_position(position_chain + [{'type': 'dict', 'key': self._get_real_value(k.value)}])
if r != []:
position += r
elif isinstance(self.value, list):
if not self.is_match:
position.append(position_chain + [self.reason])
for i, v in enumerate(self.value):
if isinstance(v, Checked):
r = v.get_unmatch_position(position_chain + [{'type': 'list', 'index': i}])
if r != []:
position += r
elif isinstance(self.value, Checked):
r = self.value.get_unmatch_position(position_chain)
if r != []:
position += r
else:
if not self.is_match:
position.append(position_chain + [self.reason])
return position
def is_valid_match_num(item_to_match, num_matched):
reason = ''
if isinstance(item_to_match, OneOf):
is_valid = True if num_matched == 1 else False
if not is_valid:
reason = f"{item_to_match.__name__}: matched {num_matched}, expect 1"
elif isinstance(item_to_match, SomeOf):
is_valid = True if 0 < num_matched <= len(item_to_match.candidates) else False
if not is_valid:
reason = f"{item_to_match.__name__}: matched {num_matched}, expect > 0 and <= {len(item_to_match.candidates)}"
elif isinstance(item_to_match, RepeatableSomeOf):
is_valid = True if num_matched > 0 else False
if not is_valid:
reason = f"{item_to_match.__name__}: matched {num_matched}, expect > 0"
elif isinstance(item_to_match, Required):
is_valid = True if num_matched == len(item_to_match.candidates) else False
if not is_valid:
reason = f"{item_to_match.__name__}: matched {num_matched}, expect {len(item_to_match.candidates)}"
elif isinstance(item_to_match, (Optional, Any)):
is_valid = True
else:
is_valid = True if num_matched == 1 else False
if not is_valid:
if isinstance(item_to_match, (String, Bool, Integer, Float)):
reason = f"no match for {item_to_match.__name__}"
else:
reason = f"no match for {item_to_match}"
return is_valid, reason
def find_key_matched(key, dst_keys):
# 主要是为了处理dict规则中有Any, String等通用key的情况
for k in dst_keys:
if check(key, k).is_match:
return k
return None
def cal_num_valid(checked):
if not isinstance(checked, Checked):
return 0, 0
if isinstance(checked.value, Checked):
valid, total = cal_num_valid(checked.value)
valid += int(checked.is_match)
total += 1
elif isinstance(checked.value, dict):
valid, total = int(checked.is_match), 1
for k, v in checked.value.items():
valid_1, total_1 = cal_num_valid(k)
valid_2, total_2 = cal_num_valid(v)
valid += valid_1 + valid_2
total += total_1 + total_2
elif isinstance(checked.value, list):
valid, total = int(checked.is_match), 1
for v in checked.value:
valid_2, total_2 = cal_num_valid(v)
valid += valid_2
total += total_2
else:
valid, total = int(checked.is_match), 1
return valid, total
def check(config, rule, ori_config=None) -> Checked:
if ori_config is None:
ori_config = config
def _check_rules(rules, config, ori_config):
for rule, desp in rules:
try:
num_vars = rule.__code__.co_argcount
if num_vars == 1:
is_valid = rule(config)
else:
is_valid = rule(config, ori_config)
if not is_valid:
return False, desp
except Exception:
traceback.print_exc()
return False, "Cannot apply rule"
return True, 'Additional rules passed'
if isinstance(rule, String):
flag = isinstance(config, str)
flag2, reason = _check_rules(rule.rules, config, ori_config)
if not flag or (flag and flag2):
return Checked(config, flag, rule.__name__)
else:
return Checked(config, flag2, reason)
if isinstance(rule, Bool):
flag = isinstance(config, bool)
flag2, reason = _check_rules(rule.rules, config, ori_config)
if not flag or (flag and flag2):
return Checked(config, flag, rule.__name__)
else:
return Checked(config, flag2, reason)
if isinstance(rule, Integer):
flag = isinstance(config, int)
flag2, reason = _check_rules(rule.rules, config, ori_config)
if not flag or (flag and flag2):
return Checked(config, flag, rule.__name__)
else:
return Checked(config, flag2, reason)
elif isinstance(rule, Float):
flag = isinstance(config, float) or isinstance(config, int)
flag2, reason = _check_rules(rule.rules, config, ori_config)
if not flag or (flag and flag2):
return Checked(config, flag, rule.__name__)
else:
return Checked(config, flag2, reason)
elif isinstance(rule, (OneOf, Required, Optional, SomeOf, RepeatableSomeOf)):
# config is alwary one element
if isinstance(rule, Optional):
res = [check(config, v, ori_config) for v in rule.candidates + (None,)]
else:
res = [check(config, v, ori_config) for v in rule.candidates]
is_match = [i.is_match for i in res]
num_valid = sum(is_match)
if isinstance(rule, (OneOf, SomeOf, RepeatableSomeOf)):
flag = True if num_valid == 1 else False
elif isinstance(rule, Required):
# Normally, Required only act on dict keys
flag = True if num_valid == 1 else False
else:
flag = True if config is None or num_valid == 1 else False
flag2, reason = _check_rules(rule.rules, config, ori_config)
if flag:
if flag2:
pos = is_match.index(True)
return Checked(res[pos], flag, reason=rule.__name__)
else:
return Checked(config, flag2, reason=reason)
else:
return Checked(config, flag, reason=rule.__name__)
elif isinstance(rule, dict):
if not isinstance(config, dict):
return Checked(config, False, f"Type {type(config)} not match dict")
if rule.get("__rule__") is None:
# rule["__rule__"] = list(rule.keys())
# required_keys = [k for k in rule.keys() if isinstance(k, (str, int)) and k != "__rule__"]
required_keys = [k for k in rule.keys() if isinstance(k, (Any, All)) or k != "__rule__"]
if len(required_keys) > 0:
# rule["__rule__"] = [Required(*required_keys)]
rule["__rule__"] = required_keys
else:
rule["__rule__"] = []
else:
if not isinstance(rule.get("__rule__"), list):
rule["__rule__"] = [rule["__rule__"]]
required_flag = True
non_required_keys = []
for r in rule["__rule__"]:
if isinstance(r, (OneOf, SomeOf, RepeatableSomeOf, Optional)):
for candidate in r.candidates:
if isinstance(candidate, (str, int)):
non_required_keys.append(candidate)
elif isinstance(r, Required):
required_flag = False
break
if required_flag:
all_keys = [k for k in rule.keys() if isinstance(k, (str, int)) and k != "__rule__"]
required_keys = list(set(all_keys) - set(non_required_keys))
if len(required_keys) > 0:
# rule["__rule__"].append(Required(*required_keys))
rule["__rule__"] += required_keys
checked_matrix = np.array([
[check(k, r, ori_config) for r in rule["__rule__"]] for k in config
])
row_size = len(checked_matrix)
if row_size > 0:
col_size = len(checked_matrix[0])
is_match_matrix = np.zeros_like(checked_matrix)
for i in range(row_size):
for j in range(col_size):
is_match_matrix[i][j] = checked_matrix[i, j].is_match
num_match_list = np.sum(is_match_matrix, axis=0)
else:
num_match_list = [0 for i in range(len(rule["__rule__"]))]
is_valid_list = [is_valid_match_num(rule["__rule__"][i], num_match_list[i]) for i in range(len(rule["__rule__"]))]
is_match = True
reason = []
for is_valid, r in is_valid_list:
if not is_valid:
is_match = False
if r:
reason.append(r)
for i in range(row_size):
if np.sum(is_match_matrix[i]) == 0:
is_match = False
reason.append(f"{list(config.keys())[i]} match no rules")
reason = ','.join(reason)
result = {}
for i, k in enumerate(list(config.keys())):
if np.sum(is_match_matrix[i]) == 0:
result[Checked(k, False, '')] = Checked(config[k], False, 'match no rules')
else:
for j, flag in enumerate(is_match_matrix[i]):
if flag:
result[checked_matrix[i][j]] = check(config[k], rule[find_key_matched(k, list(rule.keys()))], ori_config)
break
return Checked(result, is_match, reason)
elif isinstance(rule, list):
if not isinstance(config, list):
return Checked(config, False, f"Type {type(config)} not match list")
# SomeOf和RepeatableSomeOf在这里没有什么区别
if len(rule) == 1 and isinstance(rule[0], (OneOf, SomeOf, RepeatableSomeOf, Required, Optional, Any)):
if isinstance(rule[0], Any):
if len(config) != 1:
return Checked(config, False, f"List length {len(config)} != 1")
res = check(config[0], rule[0], ori_config)
return Checked([res], True, 'list')
if isinstance(rule[0], All):
return Checked(config, True, All.__name__)
if isinstance(rule[0], Optional):
if len(rule[0].candidates) != 1:
raise ValueError(f"Optional rule {rule} may not be well defined.")
if len(config) == 0:
return Checked(config, True, Optional.__name__ + "_√")
else:
rule_copy = [rule[0].candidates[0]]
res = check(config, rule_copy, ori_config)
if res.is_match:
res.reason = Optional.__name__ + "_√"
else:
res.reason = Optional.__name__ + "_×" + "," + res.reason
return res
checked_list = [check(v, rule[0], ori_config) for v in config]
is_valid_list = [v.is_match for v in checked_list]
is_match, r = is_valid_match_num(rule[0], sum(is_valid_list))
reason = []
if r:
reason.append(r)
if isinstance(rule[0], SomeOf):
valid_config = []
for i, v in enumerate(is_valid_list):
if v:
valid_config.append(config[i])
if len(set(valid_config)) != len(valid_config):
is_match = False
reason.append("Repeated items for SomeOf")
for i, v in enumerate(is_valid_list):
if not v:
is_match = False
reason.append(f"{config[i]} match nothing")
if is_match:
reason.insert(0, rule[0].__name__ + "_√")
else:
reason.insert(0, rule[0].__name__ + "_×")
reason = ','.join(reason)
return Checked(checked_list, is_match, reason)
else:
if len(config) != len(rule):
return Checked(config, False, f"List length {len(config)} != {len(rule)}")
res = [check(config[i], rule[i], ori_config) for i in range(len(rule))]
is_match = [v.is_match for v in res]
num_total = len(is_match)
num_valid = sum(is_match)
flag = (num_valid == num_total)
return Checked(res, flag, f"{num_valid}/{num_total}")
else:
if config == rule:
if isinstance(rule, (Any, All)):
is_match, reason = _check_rules(rule.rules, config, ori_config)
if not is_match:
return Checked(config, False, reason)
else:
return Checked(config, True, rule.__name__)
else:
return Checked(config, True, str(rule))
else:
if rule is None:
return Checked(config, False, "no rule")
else:
return Checked(config, False, "not equal") | 19,706 | 38.892713 | 153 | py |
XFL | XFL-master/python/common/checker/compare.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .checker import check, cal_num_valid
def compare(config, rule):
r = check(config, rule)
rule_passed, rule_checked = cal_num_valid(r)
# num_valid, num_total = cal_num_valid(r)
# result = r.result()
result = r.breif_result()
itemized_result = r.get_unmatch_position()
# print(position, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# if isinstance(result, dict):
# result['rule_passed'] = num_valid
# result['rule_checked'] = num_total
# # result["__summary__"] = f"{num_valid}/{num_total}"
# elif isinstance(result, list):
# # result.insert(0, f"__summary__: ({num_valid}/{num_total})")
# result.insert(0, f"__rule_passed: {num_valid}")
# result.insert(1, f"__rule_checked: {num_total}")
return result, itemized_result, rule_passed, rule_checked
| 1,481 | 32.681818 | 74 | py |
XFL | XFL-master/python/common/checker/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/utils/constants.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Encryption method
PLAIN = "plain"
PAILLIER = "paillier"
CKKS = "ckks"
OTP = "otp"
#
BCEWithLogitsLoss = "BCEWithLogitsLoss"
MSELoss = "MSELoss"
| 753 | 29.16 | 74 | py |
XFL | XFL-master/python/common/utils/fed_conf_parser.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.utils.logger import logger
class FedConfParser():
@classmethod
def parse_dict_conf(cls, conf: dict, node_id: str = ''):
if "node_id" not in conf or conf.get("node_id") == '':
conf["node_id"] = node_id
else:
if node_id != conf["node_id"]:
logger.warning(f"The input node_id {node_id} and node_id {conf['node_id']}in fed_conf.json not the same, use input node_id.")
out_conf = {}
out_conf["node_id"] = conf["node_id"]
grpc_conf = conf.get("grpc")
if grpc_conf is None:
use_tls = False
else:
use_tls = grpc_conf.get("use_tls") or False
fed_info = conf.get("fed_info")
scheduler_conf = fed_info["scheduler"]
scheduler_node_id = list(scheduler_conf.keys())[0]
scheduler_host, scheduler_port = scheduler_conf[scheduler_node_id].replace(" ", "").split(":")
out_conf["scheduler"] = {
"node_id": scheduler_node_id,
"host": scheduler_host,
"port": scheduler_port,
"use_tls": use_tls
}
out_conf["trainer"] = {}
if "assist_trainer" in fed_info:
node_id = list(fed_info["assist_trainer"].keys())[0]
host, port = fed_info["assist_trainer"][node_id].replace(" ", "").split(":")
out_conf["trainer"]["assist_trainer"] = {
"node_id": node_id,
"host": host,
"port": port,
"use_tls": use_tls
}
for node_id, host_port in fed_info["trainer"].items():
host, port = host_port.replace(" ", "").split(":")
out_conf["trainer"][node_id] = {
"host": host,
"port": port,
"use_tls": use_tls
}
host, port = conf["redis_server"].replace(" ", "").split(":")
out_conf["redis_server"] = {
"host": host,
"port": port
}
return out_conf
| 2,706 | 35.093333 | 141 | py |
XFL | XFL-master/python/common/utils/grpc_channel_options.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
secure_options = [
('grpc.max_send_message_length', 100 * 1024 * 1024),
('grpc.max_receive_message_length', 100 * 1024 * 1024),
('grpc.enable_retries', 1),
('grpc.service_config',
'{"retryPolicy":{ "maxAttempts": 4, "initialBackoff": "0.01s", "maxBackoff": "0.01s", "backoffMutiplier": 1, "retryableStatusCodes": ["UNAVAILABLE"]}}')
]
insecure_options = [
('grpc.max_send_message_length', 100 * 1024 * 1024),
('grpc.max_receive_message_length', 100 * 1024 * 1024),
('grpc.enable_retries', 1),
('grpc.service_config', '{"retryPolicy":{ "maxAttempts": 4, "initialBackoff": "0.01s", "maxBackoff": "0.01s", "backoffMutiplier": 1, "retryableStatusCodes": ["UNAVAILABLE"]}}')
]
| 1,316 | 42.9 | 180 | py |
XFL | XFL-master/python/common/utils/utils.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from pathlib import Path
from typing import List
import time
from functools import wraps
def save_model_config(stage_model_config: List[dict], save_path: Path) -> None:
""" Model config preserver.
Args:
stage_model_config: List[dict], Single stage model config.
save_path: Save path.
Returns: None
"""
full_path = os.path.join(save_path, "model_config.json")
if len(stage_model_config) == 0:
raise TypeError("Length of stage_model_config should larger than 0.")
if not os.path.exists(save_path):
Path(save_path).mkdir(parents=True, exist_ok=True)
# if file not exists, create one then init first stage in it.
if not os.path.exists(full_path):
with open(full_path, "w") as wf1:
json.dump(stage_model_config, fp=wf1)
else:
with open(full_path, "r") as f:
org_data = json.load(f)
org_data += stage_model_config
with open(full_path, "w") as wf:
json.dump(org_data, fp=wf)
def func_timer(func):
@wraps(func)
def with_time(*args, **kwargs):
local_time = time.time()
print(func.__name__ + " was called")
f = func(*args, **kwargs)
print(f"{func.__name__} cost {time.time()-local_time}s")
return f
return with_time
def update_dict(a: dict, b: dict):
if isinstance(a, dict) and isinstance(b, dict):
for k, v in b.items():
if k not in a.keys():
a[k] = v
else:
if isinstance(a[k], dict) and isinstance(b[k], dict):
update_dict(a[k], b[k])
else:
a[k] = b[k]
| 2,320 | 30.794521 | 79 | py |
XFL | XFL-master/python/common/utils/data_utils.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gzip
import hashlib
import os
import pathlib
import ssl
import tarfile
import zipfile
from typing import Optional
from urllib import request
from sklearn.utils import shuffle as sk_shuffle
def cal_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str:
md5 = hashlib.md5()
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_integrity(fpath: str, md5: Optional[str] = None) -> bool:
if not os.path.isfile(fpath):
return False
elif md5 is None:
return True
else:
return cal_md5(fpath) == md5
def download_url(url: str, fpath: str, md5: str, chunk_size: int = 1024 * 32) -> None:
if check_integrity(fpath, md5):
print("Verified dataset Already exists")
return
print("Dataset downloading...")
with request.urlopen(request.Request(url), context=ssl._create_unverified_context()) as response:
with open(fpath, "wb") as fh:
for chunk in iter(lambda: response.read(chunk_size), b""):
if not chunk:
continue
fh.write(chunk)
fh.close()
def extract_file_recursively(from_path: str, to_path: str) -> None:
def extract(from_path, to_path, suffix):
if suffix == ".tar":
with tarfile.open(from_path, "r") as tar:
tar.extractall(to_path)
elif suffix == ".gz":
with gzip.open(from_path, "rb") as rfh, open(to_path, "wb") as wfh:
wfh.write(rfh.read())
elif suffix == ".zip":
with zipfile.ZipFile(from_path, "r") as f:
for file in f.namelist():
f.extract(file, to_path)
suffixes = pathlib.Path(from_path).suffixes
suffix = suffixes[-1]
if len(suffixes) == 1:
if suffix not in [".gz", ".tar", ".zip"]:
return
extract(from_path, to_path, suffix)
os.remove(from_path)
return
else:
_to_path = pathlib.Path(from_path).parent.joinpath(pathlib.Path(from_path).stem)
extract(from_path, _to_path, suffix)
os.remove(from_path)
from_path = _to_path
extract_file_recursively(from_path, to_path)
def download_and_extract_data(url: str, md5: str, data_path: str, data_folder: Optional[str] = None, to_path: Optional[str] = None) -> None:
if not to_path:
to_path = pathlib.Path(data_path).parent
if data_folder:
final_path = os.path.join(to_path, data_folder)
if os.path.exists(final_path) and os.path.getsize(final_path) > 0:
print("Dataset has already existed")
return
download_url(url, data_path, md5)
extract_file_recursively(data_path, to_path)
print("Data finished downloading and extraction")
def pd_train_test_split(df, test_ratio: float, shuffle: bool = False, random_state: int = None):
if shuffle:
df = sk_shuffle(df, random_state=random_state)
train_df = df[int(len(df)*test_ratio):].reset_index(drop=True)
test_df = df[:int(len(df)*test_ratio)].reset_index(drop=True)
return train_df, test_df
| 3,785 | 33.108108 | 140 | py |
XFL | XFL-master/python/common/utils/logger.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging.config
import os
from logging import FileHandler, LogRecord
LOG_PATH = "/opt/log"
class ColorFormatter(logging.Formatter):
log_colors = {
'CRITICAL': '\033[0;31m',
'ERROR': '\033[0;33m',
'WARNING': '\033[0;35m',
'INFO': '\033[0;32m',
'DEBUG': '\033[0;00m',
}
def format(self, record: LogRecord) -> str:
s = super().format(record)
level_name = record.levelname
if level_name in self.log_colors:
return self.log_colors[level_name] + s + '\033[0m'
return s
logger = logging.getLogger("root")
logger.setLevel(logging.INFO)
# logger.setLevel(logging.DEBUG)
# format
formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s")
color_formatter = ColorFormatter("%(asctime)s %(levelname)s: %(message)s")
# console output
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(color_formatter)
logger.addHandler(streamHandler)
def get_node_log_path(job_id: str, node_ids: list[str]):
log_path = {}
for node_id in node_ids:
path = "{}/{}/{}/xfl.log".format(LOG_PATH, job_id, node_id)
log_path[node_id] = path
return log_path
def get_stage_node_log_path(job_id: str, train_conf: dict):
stages_log_path = {}
for stage_id, node_conf in train_conf.items():
stages_log_path[stage_id] = {}
for node_id, conf in node_conf.items():
model_name = conf.get('model_info', {}).get('name', '')
if model_name == '':
continue
path = "{}/{}/{}/stage{}_{}.log".format(LOG_PATH, job_id, node_id, stage_id, model_name)
stages_log_path[stage_id][node_id] = path
return stages_log_path
def add_job_log_handler(job_id: str, node_id: str) -> object:
if not os.path.exists("{}/{}/{}".format(LOG_PATH, job_id, node_id)):
os.makedirs("{}/{}/{}".format(LOG_PATH, job_id, node_id))
job_handler = FileHandler("{}/{}/{}/xfl.log".format(LOG_PATH, job_id, node_id))
job_handler.setFormatter(formatter)
logger.addHandler(job_handler)
return job_handler
def add_job_stage_log_handler(job_id: str, node_id: str, stage_id: int, model_name: str) -> object:
if model_name == '':
return None
if not os.path.exists("{}/{}/{}".format(LOG_PATH, job_id, node_id)):
os.makedirs("{}/{}/{}".format(LOG_PATH, job_id, node_id))
stage_handler = FileHandler("{}/{}/{}/stage{}_{}.log".format(LOG_PATH, job_id, node_id, stage_id, model_name))
stage_handler.setFormatter(formatter)
logger.addHandler(stage_handler)
return stage_handler
def remove_log_handler(handler):
logger.removeHandler(handler)
| 3,285 | 33.229167 | 114 | py |
XFL | XFL-master/python/common/utils/tree_transfer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from algorithm.core.tree.tree_structure import Node, Tree
from common.utils.tree_pickle_structure import NodePickle, TreePickle
def label_trainer_tree_transfer(tree: Tree) -> TreePickle:
"""Transfer label trainer tree structure to pickle layout.
Args:
tree: Tree.
Returns: TreePickle
"""
nodes_pickle = {}
for k, node in tree.nodes.items():
nodes_pickle[k] = NodePickle(id=node.id, depth=node.depth, parent_node_id=node.parent_node_id,
left_node_id=node.left_node_id, right_node_id=node.right_node_id,
is_leaf=node.is_leaf,
weight=node.weight, linkage=node.linkage, split_point=node.split_info.split_point,
feature_idx=node.split_info.feature_idx,
missing_value_on_left=node.split_info.missing_value_on_left,
owner_id=node.split_info.owner_id) \
if node.split_info else NodePickle(id=node.id, depth=node.depth, parent_node_id=node.parent_node_id,
left_node_id=node.left_node_id, right_node_id=node.right_node_id,
is_leaf=node.is_leaf, weight=node.weight, linkage=node.linkage,
split_point=None, feature_idx=None, missing_value_on_left=None,
owner_id=None)
return TreePickle(party_id=tree.party_id, nodes=nodes_pickle, root_node_id=tree.root_node_id,
root_node=nodes_pickle[tree.root_node_id])
def trainer_tree_transfer(nodes: Dict[str, Node]) -> Dict[str, NodePickle]:
""" Transfer trainer nodes structure to pickle layout.
Args:
nodes: Node.
Returns: NodePickle.
"""
nodes_pickle = {}
for k, node in nodes.items():
nodes_pickle[k] = NodePickle(id=node.id, depth=node.depth, parent_node_id=node.parent_node_id,
left_node_id=node.left_node_id, right_node_id=node.right_node_id,
is_leaf=node.is_leaf,
weight=node.weight, linkage=node.linkage, split_point=node.split_info.split_point,
feature_idx=node.split_info.feature_idx,
missing_value_on_left=node.split_info.missing_value_on_left,
owner_id=node.split_info.owner_id)
return nodes_pickle
| 3,234 | 46.573529 | 119 | py |
XFL | XFL-master/python/common/utils/model_io.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import json
from typing import Optional
from pathlib import Path
import torch
from common.utils.logger import logger
class ModelIO:
@staticmethod
def _gen_model_path(
save_dir: str,
model_name: str,
epoch: Optional[int] = None,
) -> Path:
split_name = model_name.split(".")
if epoch is None:
model_name = '.'.join(split_name[:-1]) + '.' + split_name[-1]
else:
model_name = '.'.join(split_name[:-1]) + f'_epoch_{epoch}.' + split_name[-1]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_path = Path(save_dir, model_name)
return model_path
@staticmethod
def save_torch_model(state_dict,
save_dir: str,
model_name: str,
meta_dict: dict = {},
epoch: Optional[int] = None,
version: str = '1.4.0'):
model_dict = {}
model_dict.update(meta_dict)
model_dict = {"state_dict": state_dict, "version": version}
model_path = ModelIO._gen_model_path(save_dir, model_name, epoch)
torch.save(model_dict, model_path)
logger.info("Model saved as: {}".format(model_path))
@staticmethod
def copy_best_model(
save_dir: str,
model_name: str,
epoch: Optional[int] = None
):
model_path = ModelIO._gen_model_path(save_dir, model_name, epoch)
best_model_path = ModelIO._gen_model_path(save_dir, model_name)
shutil.copy(model_path, best_model_path)
logger.info("Best model saved as: {}".format(best_model_path))
@staticmethod
def load_torch_model(model_path: str, device: str = "cpu"):
if device == "cpu":
model_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
elif "cuda" in device:
model_dict = torch.load(model_path, map_location=lambda storage, loc: storage.cuda(0))
else:
raise ValueError(f"Device {device} not support.")
logger.info("Pretrain model loaded from: {}".format(model_path))
return model_dict
@staticmethod
def save_torch_onnx(model, input_dim: tuple, save_dir: str, model_name: str, epoch: Optional[int] = None):
dummy_input = torch.randn(1, *input_dim)
model_path = ModelIO._gen_model_path(save_dir, model_name, epoch)
torch.onnx.export(model,
dummy_input,
model_path,
verbose=False,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
logger.info("Model saved as: {}".format(model_path))
@staticmethod
def save_json_model(model_dict: dict,
save_dir: str,
model_name: str,
meta_dict: dict = {},
epoch: Optional[int] = None,
version: str = '1.4.0'):
new_model_dict = {}
new_model_dict.update(meta_dict)
new_model_dict.update(model_dict)
new_model_dict["version"] = version
model_path = ModelIO._gen_model_path(save_dir, model_name, epoch)
fp = open(model_path, 'w')
json.dump(new_model_dict, fp)
logger.info("Model saved as: {}".format(model_path))
@staticmethod
def load_json_model(model_path: str):
with open(model_path, 'r') as fp:
model_dict = json.load(fp)
logger.info("Model loaded from: {}".format(model_path))
return model_dict
@staticmethod
def save_json_proto(model_dict: dict,
save_dir: str,
model_name: str,
meta_dict: dict = {},
epoch: Optional[int] = None,
version: str = '1.4.0'):
pass
@staticmethod
def load_json_proto(model_path: str):
pass
| 4,851 | 35.757576 | 110 | py |
XFL | XFL-master/python/common/utils/config.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
def load_json_config(file):
with open(file) as json_data_file:
return json.load(json_data_file)
def get_str_config(s):
return json.loads(s)
def parse_config(s):
json_str = json.loads(s)
config = {
"scheduler": {},
"trainer": {}
}
for node_id in json_str["nodes"]:
for endpoint in json_str["nodes"][node_id]["endpoints"]:
url = endpoint["url"]
if "grpcs://" in url:
use_tls = True
url = url.replace("grpcs://", "")
else:
use_tls = False
url = url.replace("grpc://", "")
host = url.split(":")[0]
port = url.split(":")[1]
if "scheduler" in endpoint["fuwuEndpointId"]:
config["scheduler"]["node_id"] = node_id
config["scheduler"]["host"] = host
config["scheduler"]["port"] = port
config["scheduler"]["use_tls"] = use_tls
config["scheduler"]["name"] = json_str["nodes"][node_id]["name"]
elif "assist-trainer" in endpoint["fuwuEndpointId"]:
config["trainer"]["assist_trainer"] = {}
config["trainer"]["assist_trainer"]["host"] = host
config["trainer"]["assist_trainer"]["port"] = port
config["trainer"]["assist_trainer"]["use_tls"] = use_tls
config["trainer"]["assist_trainer"]["name"] = json_str["nodes"][node_id]["name"]
elif "trainer" in endpoint["fuwuEndpointId"]:
config["trainer"][node_id] = {}
config["trainer"][node_id]["host"] = host
config["trainer"][node_id]["port"] = port
config["trainer"][node_id]["use_tls"] = use_tls
config["trainer"][node_id]["name"] = json_str["nodes"][node_id]["name"]
return config
def refill_config(custom_conf: dict, default_conf: dict):
"""fill custom_conf by default_conf if a key is missing in custom_conf iteratively"""
for k, v in default_conf.items():
if k not in custom_conf:
custom_conf[k] = v
else:
if isinstance(v, dict):
custom_conf[k] = refill_config(custom_conf[k], v)
return custom_conf
| 2,888 | 37.013158 | 96 | py |
XFL | XFL-master/python/common/utils/algo_utils.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, Union
import numpy as np
import torch
from numpy.core.records import ndarray
from sklearn.metrics import auc, roc_curve
from torch.nn import Module
from common.utils.logger import logger
class MapeLoss(Module):
def __init__(self):
super(MapeLoss, self).__init__()
def forward(self, preds: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
"""
Args:
preds:
labels:
Returns:
"""
mask = (labels != 0)
distance = torch.abs(preds - labels) / torch.abs(labels)
return torch.mean(distance[mask])
class BiClsAccuracy(Module):
# torch mape loss function
def __init__(self):
super(BiClsAccuracy, self).__init__()
def forward(self, confusion_matrix: np.array) -> torch.Tensor:
"""
Binary Classification Accuracy
Args:
confusion_matrix:
Returns:
"""
tn, fp, fn, tp = confusion_matrix.ravel()
return (tn + tp) / (tn + fp + fn + tp)
class BiClsPrecision(Module):
def __init__(self):
super(BiClsPrecision, self).__init__()
def forward(self, confusion_matrix: np.array) -> torch.Tensor:
"""
Binary Classification precision
Args:
confusion_matrix:
Returns:
"""
tn, fp, fn, tp = confusion_matrix.ravel()
if fp + tp > 0:
return tp / (fp + tp)
else:
return torch.Tensor([0.0])
class BiClsRecall(Module):
def __init__(self):
super(BiClsRecall, self).__init__()
def forward(self, confusion_matrix: np.array) -> torch.Tensor:
"""
Binary Classification recall
Args:
confusion_matrix:
Returns:
"""
tn, fp, fn, tp = confusion_matrix.ravel()
if fn + tp > 0:
return tp / (fn + tp)
else:
return torch.Tensor([0.0])
class BiClsF1(Module):
def __init__(self):
super(BiClsF1, self).__init__()
def forward(self, confusion_matrix: np.array) -> torch.Tensor:
"""
Binary Classification recall
Args:
confusion_matrix:
Returns:
"""
tn, fp, fn, tp = confusion_matrix.ravel()
if fp + tp > 0 and fn + tp > 0:
precision, recall = tp / (fp + tp), tp / (fn + tp)
return 2 * precision * recall / (precision + recall)
else:
return torch.Tensor([0.0])
class BiClsAuc(Module):
def __init__(self):
super(BiClsAuc, self).__init__()
def forward(self, tpr: np.array, fpr: np.array) -> float:
"""
auc
Args:
tpr: TP / (TP + FN)
fpr: FP / (FP + TN)
Returns: auc_score
"""
auc_score = auc(fpr, tpr)
return auc_score
class BiClsKS(Module):
def __init__(self):
super(BiClsKS, self).__init__()
def forward(self, tpr: np.array, fpr: np.array) -> float:
"""
ks
Args:
tpr: TP / (TP + FN)
fpr: FP / (FP + TN)
Returns: ks
"""
ks = max(np.max(tpr - fpr), 0)
return ks
class aucScore(Module):
def __init__(self):
super(aucScore, self).__init__()
def forward(self, pred: np.array, label: np.array) -> Tuple[float, Union[ndarray, int, float, complex]]:
"""
auc
Args:
pred:
label:
Returns: auc_score, ks
"""
fpr, tpr, _ = roc_curve(label, pred)
auc_score = auc(fpr, tpr)
ks = max(np.max(tpr - fpr), 0)
return auc_score, ks
class earlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, key: str, patience: int = 10, delta: float = 0):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.key = key
self.counter = 0
self.best_score = None
self.early_stop = False
self.delta = delta
def __call__(self, metric) -> Tuple[bool, bool]:
if self.key not in metric:
raise KeyError("Key {} cannot found in metrics.".format(self.key))
save_flag, val_score = False, metric[self.key]
if self.best_score is None:
self.best_score, save_flag = val_score, True
elif val_score < self.best_score + self.delta:
self.counter += 1
logger.info(
f'EarlyStopping counter: {self.counter} out of {self.patience}. Epoch score {val_score}, '
f'best score {self.best_score}.')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score, save_flag = val_score, True
self.counter = 0
return self.early_stop, save_flag
class _earlyStopping:
"""Early stops the training if validation metric doesn't increase or decrease after a given patience."""
def __init__(self, key: str, patience: int = 10, delta: float = 0, maxmize: bool = True):
"""
Args:
key (str): The key of metric to monitor.
patience (int): How long to wait after last time validation loss improved.
Default: 10
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
maxmize (bool): If True, we try to maxmize the metric. Otherwise, we try to minimize the metric.
"""
self.patience = patience
self.key = key
self.counter = 0
self.best_score = None
self.best_epoch = None
self.early_stop = False
self.maxmize = 1 if maxmize else -1
self.delta = delta * maxmize
def __call__(self, metric: dict, epoch: int) -> bool:
'''
Args:
metric (dict): The metric dict.
epoch (int): The current epoch.
'''
if self.key not in metric:
raise KeyError("Key {} cannot found in metrics.".format(self.key))
val_score = metric[self.key]
if self.best_score is None:
# update best score and best epoch
self.best_score = val_score
self.best_epoch = epoch
elif (val_score * self.maxmize) < ((self.best_score + self.delta) * self.maxmize):
self.counter += 1
logger.info(
f'EarlyStopping counter: {self.counter} out of {self.patience}. '
f'Epoch {epoch} score {val_score}, '
f'best epoch {self.best_epoch} best score {self.best_score}.')
if (val_score * self.maxmize) < (self.best_score * self.maxmize):
# update best score and best epoch
self.best_score = val_score
self.best_epoch = epoch
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = val_score
self.best_epoch = epoch
self.counter = 0
return self.early_stop
class earlyStoppingH(_earlyStopping):
"""Early stops the training if validation metric doesn't increase after a given patience."""
def __init__(self, key: str, patience: int = 10, delta: float = 0):
"""
Args:
key (str): The key of metric to monitor.
patience (int): How long to wait after last time validation loss improved.
Default: 10
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
maxmize = None
if key in ["acc", "precision", "recall", "f1_score", "auc", "ks"]:
maxmize = True
elif key in ["mae", "mse", "mape", "rmse"]:
maxmize = False
else:
raise ValueError("Key {} cannot be monitored.".format(key))
super().__init__(key, patience, delta, maxmize=maxmize) | 9,180 | 30.016892 | 108 | py |
XFL | XFL-master/python/common/utils/config_parser.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from service.fed_job import FedJob
# from service.fed_node import FedNode
def replace_variable(input, stage_id: int, job_id: str, node_id: str):
if isinstance(input, dict):
return {k: replace_variable(v, stage_id, job_id, node_id) for k, v in input.items()}
elif isinstance(input, list):
return [replace_variable(v, stage_id, job_id, node_id) for v in input]
elif isinstance(input, str):
input = input.replace("[STAGE_ID]", str(stage_id)).replace("[JOB_ID]", str(job_id)).replace("[NODE_ID]", str(node_id))
if "STAGE_ID" in input:
start = -1
for idx, c in enumerate(input):
if c == '[':
start = idx
elif c == ']':
end = idx
if start != -1:
s = input[start+1: end]
s = s.replace("STAGE_ID", str(stage_id))
nums = s.split('-')
if len(nums) == 2:
stage_id = int(nums[0]) - int(nums[1])
input = input.replace(input[start: end+1], str(stage_id))
start = -1
return input
else:
return input
class TrainConfigParser(object):
def __init__(self, config: dict) -> None:
self.train_conf = config
self.inference = config.get("inference", False)
self.identity = config.get("identity")
self.fed_config = config.get("fed_info")
self.model_info = config.get("model_info")
self.train_info = config.get("train_info", {})
self.extra_info = config.get("extra_info")
self.computing_engine = config.get("computing_engine", "local")
self.device = self.train_info.get("device", "cpu")
self.train_params = self.train_info.get("params") or self.train_info.get("train_params")
self.interaction_params = self.train_info.get("interaction_params", {})
self.save_frequency = self.interaction_params.get("save_frequency", -1)
self.write_training_prediction = \
self.interaction_params.get("write_training_prediction", False)
self.write_validation_prediction = \
self.interaction_params.get("write_validation_prediction", False)
self.input = config.get("input", {})
self.input_trainset = self.input.get("trainset", [])
self.input_valset = self.input.get("valset", [])
self.input_testset = self.input.get("testset", [])
self.output = config.get("output")
class CommonConfigParser:
# Parse the original config.json to extract common config fields
def __init__(self, config: dict) -> None:
self.config = config
self.identity = config.get("identity")
self.model_info = config.get("model_info", {})
self.model_conf = self.model_info.get("config", {})
self.input = config.get("input", {})
self.input_trainset = self.input.get("trainset", [])
self.input_valset = self.input.get("valset", [])
self.input_testset = self.input.get("testset", [])
self.pretrain_model = self.input.get("pretrain_model", {})
self.pretrain_model_path = self.pretrain_model.get("path", "")
self.pretrain_model_name = self.pretrain_model.get("name", "")
self.output = config.get("output", {})
self.output_dir = self.output.get("path", "")
self.output_model_name = self.output.get("model", {}).get("name", "")
self.output_onnx_model_name = self.output.get("onnx_model", {}).get("name", "")
self.train_info = config.get("train_info", {})
self.device = self.train_info.get("device", "cpu")
self.interaction_params = self.train_info.get("interaction_params", {})
self.save_frequency = self.interaction_params.get("save_frequency", -1)
self.echo_training_metrics = self.interaction_params.get("echo_training_metrics", False)
self.write_training_prediction = \
self.interaction_params.get("write_training_prediction", False)
self.write_validation_prediction = \
self.interaction_params.get("write_validation_prediction", False)
self.train_params = self.train_info.get("train_params", {})
self.aggregation = self.train_params.get("aggregation", {})
self.encryption = self.train_params.get("encryption", {"plain": {}})
self.optimizer = self.train_params.get("optimizer", {})
self.lr_scheduler = self.train_params.get("lr_scheduler", {})
self.lossfunc = self.train_params.get("lossfunc", {})
self.metric = self.train_params.get("metric", {})
self.early_stopping = self.train_params.get("early_stopping", {})
self.random_seed = self.train_params.get("random_seed", None) | 5,497 | 44.438017 | 126 | py |
XFL | XFL-master/python/common/utils/tree_pickle_structure.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
class NodePickle:
def __init__(self, id: str, depth: int, parent_node_id: Optional[str], left_node_id: Optional[str],
right_node_id: Optional[str], is_leaf: bool, weight: Optional[float], linkage: Optional[str],
split_point: Optional[float], feature_idx: Optional[int], missing_value_on_left: Optional[bool],
owner_id: Optional[str]):
super(NodePickle, self).__init__()
self.id = id
self.depth = depth
self.parent_node_id = parent_node_id
self.left_node_id = left_node_id
self.right_node_id = right_node_id
self.is_leaf = is_leaf
self.weight = weight
self.linkage = linkage
self.split_point = split_point
self.feature_idx = feature_idx
self.missing_value_on_left = missing_value_on_left
self.owner_id = owner_id
class TreePickle:
def __init__(self, party_id: str, nodes: Dict[str, NodePickle], root_node: NodePickle, root_node_id: str):
super(TreePickle, self).__init__()
self.party_id = party_id
self.nodes = nodes
self.root_node = root_node
self.root_node_id = root_node_id
| 1,823 | 38.652174 | 113 | py |
XFL | XFL-master/python/common/utils/config_checker.py | import os
import importlib
import traceback
from collections import Counter
from common.checker.compare import compare
from common.utils.logger import logger
from common.utils.config_parser import replace_variable
def find_rule_class(fed_type, operator_name, role, inference):
try:
if inference:
operator_name += '_infer'
module_path = '.'.join(['algorithm.config_descriptor', fed_type + '_' + operator_name, role]) # , 'local_' + operator + '_rule'])
module = importlib.import_module(module_path)
except Exception: # ModuleNotFoundError:
logger.warning(traceback.format_exc())
return None
try:
if fed_type == 'local':
rule = getattr(module, fed_type + '_' + operator_name + '_rule')
elif fed_type == 'vertical':
rule = getattr(module, fed_type + '_' + operator_name + '_' + role + '_rule')
else:
return None
except Exception:
return None
return rule
def check_stage_train_conf(conf):
role = conf.get("identity")
name = conf.get('model_info', {}).get('name')
if not name:
res = {
"result": [],
"itemized_result": [],
"summary": [],
"message": []
}
return res
fed_type = name.split('_')[0]
operator_name = '_'.join(name.split('_')[1:])
inference = True if conf.get('inference') else False
res = {
"result": {},
"summary": (0, 0),
"message": 'Rule not found.'
}
if not role or not name:
res["message"] = f"Role {role} or Name {name} not valid."
return res
rule = find_rule_class(fed_type, operator_name, role, inference)
if not rule:
return res
try:
result, itemized_result, rule_passed, rule_checked = compare(conf, rule)
except Exception:
logger.warning(traceback.format_exc())
logger.info("Error when checking train_config.")
return res
res = {
"result": result,
"itemized_result": itemized_result,
"summary": (rule_passed, rule_checked),
"message": 'Config checked.'
}
return res
def check_multi_stage_train_conf(conf: list):
if not isinstance(conf, list):
return [], [(0, 1)], "Not a list"
res = {
"result": [],
"itemized_result": [],
"summary": [],
"message": []
}
for stage_conf in conf:
if not isinstance(stage_conf, dict):
stage_result = {"rule_passed": 0, "rule_checked": 1}
stage_message = "Not a dict."
else:
report = check_stage_train_conf(stage_conf)
stage_result = report["result"]
stage_itemized_result = report["itemized_result"]
stage_summary = report["summary"]
stage_message = report["message"]
res["result"].append(stage_result)
res["itemized_result"].append(stage_itemized_result)
res["summary"].append(stage_summary)
res["message"].append(stage_message)
return res
def check_cross_stage_input_output(conf: list, ignore_list: list = []):
input_dict = {}
output_dict = {}
"""
{
0: [
{
"key_chain": ["input", "trainset"],
"value": "/opt/dataset/a.csv"
}
]
}
"""
for stage_id, stage_conf in enumerate(conf):
input = stage_conf.get("input", {})
path = input.get("path", "")
input_path = []
for key in input:
if isinstance(input[key], list):
for item in input[key]:
local_path = item.get("path", "") or path
local_name = item.get("name", "")
if isinstance(local_name, list):
for name in local_name:
input_path.append(
{
"key_chain": ["input", key],
"value": os.path.join(local_path, name)
}
)
else:
input_path.append(
{
"key_chain": ["input", key],
"value": os.path.join(local_path, local_name)
}
)
elif isinstance(input[key], dict):
item = input[key]
local_path = item.get("path", "") or path
local_name = item.get("name", "")
if isinstance(local_name, list):
for name in local_name:
input_path.append(
{
"key_chain": ["input", key],
"value": os.path.join(local_path, name)
}
)
else:
input_path.append(
{
"key_chain": ["input", key],
"value": os.path.join(local_path, local_name)
}
)
input_dict[stage_id] = input_path
output = stage_conf.get("output", {})
path = output.get("path", "")
output_path = []
for key in output:
if isinstance(output[key], dict):
local_path = output[key].get("path") or path
local_name = output[key].get("name", "")
output_path.append(
{
"key_chain": ["output", key],
"value": os.path.join(local_path, local_name)
}
)
output_dict[stage_id] = output_path
input_dict_a = {k: replace_variable(v, stage_id=k, job_id='JOB_ID', node_id='NODE_ID') for k, v in input_dict.items()}
output_dict_a = {k: replace_variable(v, stage_id=k, job_id='JOB_ID', node_id='NODE_ID') for k, v in output_dict.items()}
def find_duplicated_and_blank(in_dict, duplicated=True):
result = {
"duplicated": [],
"blank": [],
"nonexistent": []
}
stage_id_list = []
key_chain_list = []
value_list = []
for stage_id in in_dict:
for path_dict in in_dict[stage_id]:
stage_id_list.append(stage_id)
key_chain_list.append(path_dict['key_chain'])
value_list.append(path_dict['value'])
if duplicated:
count_result = dict(Counter(value_list))
for k in count_result:
# find duplicated
if count_result[k] > 1:
index = [i for i, v in enumerate(value_list) if v == k]
if index:
result['duplicated'].append(
{
"value": k,
"position": [
{
"stage": stage_id_list[i],
"key_chain": key_chain_list[i],
} for i in index
]
}
)
# find blank
index = [i for i, v in enumerate(value_list) if v.strip() == '']
if index:
result['blank'].append(
{
"value": '',
"position": [
{
"stage": stage_id_list[i],
"key_chain": key_chain_list[i],
} for i in index
]
}
)
return result
def find_nonexistent(input_dict, output_dict, ignore_list):
result = {
"duplicated": [],
"blank": [],
"nonexistent": []
}
stage_id_list = []
key_chain_list = []
value_list = []
for stage_id in input_dict:
for path_dict in input_dict[stage_id]:
stage_id_list.append(stage_id)
key_chain_list.append(path_dict['key_chain'])
value_list.append(path_dict['value'])
output_stage_id_list = []
output_key_chain_list = []
output_value_list = []
for stage_id in output_dict:
for path_dict in output_dict[stage_id]:
output_stage_id_list.append(stage_id)
output_key_chain_list.append(path_dict['key_chain'])
output_value_list.append(path_dict['value'])
for i, stage_id in enumerate(stage_id_list):
ids = [j for j, stage in enumerate(output_stage_id_list) if stage < stage_id]
if value_list[i] not in [output_value_list[j] for j in ids] and value_list[i] not in ignore_list:
result['nonexistent'].append(
{
"value": value_list[i],
"position": [
{
"stage": stage_id_list[i],
"key_chain": key_chain_list[i],
}
]
}
)
return result
result = {
"duplicated": [],
"blank": [],
"nonexistent": []
}
r1 = find_duplicated_and_blank(input_dict_a, duplicated=False)
r2 = find_duplicated_and_blank(output_dict_a)
r3 = find_nonexistent(input_dict_a, output_dict_a, ignore_list)
result["duplicated"] += r1["duplicated"]
result["duplicated"] += r2["duplicated"]
result["blank"] += r1["blank"]
result["blank"] += r2["blank"]
result["nonexistent"] += r3["nonexistent"]
return result
if __name__ == "__main__":
# path = '/mnt/c/Documents and Settings/wanghong/workspace/federated-learning/demo/vertical/xgboost/2party_env/config/trainer_config_node-1.json'
# import json
# conf = json.load(open(path, 'r'))
conf = \
[
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_binning_woe_iv_fintech"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True,
"nan_list": [
]
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "vertical_binning_woe_iv_[STAGE_ID].json"
},
"iv": {
"name": "woe_iv_result_[STAGE_ID].json"
},
"split_points": {
"name": "binning_split_points_[STAGE_ID].json"
},
"trainset": {
"name": "fintech_woe_map_train_[STAGE_ID].csv"
}
},
"train_info": {
"interaction_params": {
"save_model": True
},
"train_params": {
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
},
"binning": {
"method": "equal_width",
"bins": 5
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_feature_selection"
},
"input": {
"iv_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "woe_iv_result_[STAGE_ID-1].json"
},
"trainset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "feature_selection_[STAGE_ID].pkl"
},
"trainset": {
"name": "selected_train_[STAGE_ID].csv"
},
"valset": {
"name": "selected_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"filter": {
"common": {
"metrics": "iv",
"filter_method": "threshold",
"threshold": 0.01
}
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_pearson"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"corr": {
"name": "vertical_pearson_[STAGE_ID].pkl"
}
},
"train_info": {
"train_params": {
"col_index": -1,
"col_names": "",
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 6,
"djn_on": True,
"parallelize_on": True
}
},
"max_num_cores": 999,
"sample_size": 9999
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_feature_selection"
},
"input": {
"corr_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "vertical_pearson_[STAGE_ID-1].pkl"
},
"iv_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "woe_iv_result_[STAGE_ID-3].json"
},
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-2].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_val_[STAGE_ID-2].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "feature_selection_[STAGE_ID].pkl"
},
"trainset": {
"name": "selected_train_[STAGE_ID].csv"
},
"valset": {
"name": "selected_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"filter": {
"common": {
"metrics": "iv",
"filter_method": "threshold",
"threshold": 0.01
},
"correlation": {
"sort_metric": "iv",
"correlation_threshold": 0.7
}
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "local_normalization"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_val_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "local_normalization_[STAGE_ID].pt"
},
"trainset": {
"name": "normalized_train_[STAGE_ID].csv"
},
"valset": {
"name": "normalized_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"norm": "max",
"axis": 0
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_logistic_regression"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "normalized_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "normalized_val_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"pretrained_model": {
"path": "",
"name": ""
}
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "vertical_logitstic_regression_[STAGE_ID].pt"
},
"metric_train": {
"name": "lr_metric_train_[STAGE_ID].csv"
},
"metric_val": {
"name": "lr_metric_val_[STAGE_ID].csv"
},
"prediction_train": {
"name": "lr_prediction_train_[STAGE_ID].csv"
},
"prediction_val": {
"name": "lr_prediction_val_[STAGE_ID].csv"
},
"ks_plot_train": {
"name": "lr_ks_plot_train_[STAGE_ID].csv"
},
"ks_plot_val": {
"name": "lr_ks_plot_val_[STAGE_ID].csv"
},
"decision_table_train": {
"name": "lr_decision_table_train_[STAGE_ID].csv"
},
"decision_table_val": {
"name": "lr_decision_table_val_[STAGE_ID].csv"
},
"feature_importance": {
"name": "lr_feature_importance_[STAGE_ID].csv"
}
},
"train_info": {
"interaction_params": {
"save_frequency": -1,
"write_training_prediction": True,
"write_validation_prediction": True,
"echo_training_metrics": True
},
"train_params": {
"global_epoch": 2,
"batch_size": 512,
"encryption": {
"ckks": {
"poly_modulus_degree": 8192,
"coeff_mod_bit_sizes": [
60,
40,
40,
60
],
"global_scale_bit_size": 40
}
},
"optimizer": {
"lr": 0.01,
"p": 2,
"alpha": 1e-4
},
"metric": {
"decision_table": {
"method": "equal_frequency",
"bins": 10
},
"acc": {},
"precision": {},
"recall": {},
"f1_score": {},
"auc": {},
"ks": {}
},
"early_stopping": {
"key": "acc",
"patience": 10,
"delta": 0
},
"random_seed": 50
}
}
}
]
result = check_multi_stage_train_conf(conf)
print(result)
result = check_cross_stage_input_output(conf)
print(result)
conf = [
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_xgboost"
},
"inference": True
}
]
result = check_multi_stage_train_conf(conf)
print(result)
| 25,081 | 35.037356 | 149 | py |
XFL | XFL-master/python/common/utils/config_sync.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Dict
from common.checker.matcher import get_matched_config
from common.communication.gRPC.python.channel import DualChannel
from common.utils.utils import update_dict
from service.fed_node import FedNode
from service.fed_config import FedConfig
class ConfigSynchronizer:
def __init__(self, config: dict):
self.config = copy.deepcopy(config)
assist_trainer = FedConfig.get_assist_trainer()
label_trainers = FedConfig.get_label_trainer()
trainers = FedConfig.get_trainer()
assist_trainer = [assist_trainer] if assist_trainer else []
all_trainers = assist_trainer + label_trainers + trainers
self.coordinator = all_trainers[0]
self.is_coordinator = FedNode.node_id == self.coordinator
if self.is_coordinator:
self.sync_chann: Dict[str, DualChannel] = {}
for party_id in [id for id in all_trainers if id != self.coordinator]:
self.sync_chann[party_id] = DualChannel(
name="sync_" + party_id, ids=[self.coordinator, party_id])
else:
self.sync_chann: DualChannel = None
self.sync_chann = DualChannel(
name="sync_" + FedNode.node_id, ids=[self.coordinator, FedNode.node_id]
)
def sync(self, sync_rule: dict):
''' for example:
sync_rule = {
"train_info": All()
}
'''
def count_key(conf):
if isinstance(conf, dict):
num = len(conf.keys())
for k, v in conf.items():
num += count_key(v)
return num
else:
return 0
if self.is_coordinator:
conf_to_update = get_matched_config(self.config, sync_rule)
max_key_num = count_key(conf_to_update)
for party_id in self.sync_chann:
conf = self.sync_chann[party_id].recv()
num = count_key(conf)
if num >= max_key_num:
conf_to_update = conf
max_key_num = num
for party_id in self.sync_chann:
self.sync_chann[party_id].send(conf_to_update)
else:
config_to_sync = get_matched_config(self.config, sync_rule)
self.sync_chann.send(config_to_sync)
conf_to_update = self.sync_chann.recv()
update_dict(self.config, conf_to_update)
return self.config
| 3,155 | 36.571429 | 87 | py |
XFL | XFL-master/python/common/utils/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/utils/model_preserver.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import OrderedDict
import torch
from common.utils.logger import logger
# TODO: 逐渐替代这个,以后会删除
class ModelPreserver(object):
@staticmethod
def save(save_dir: str,
model_name: str,
state_dict: OrderedDict,
epoch: int = None,
final: bool = False,
suggest_threshold: float = None
):
if not os.path.exists(save_dir): os.makedirs(save_dir)
model_info = {"state_dict": state_dict}
if suggest_threshold:
model_info["suggest_threshold"] = suggest_threshold
model_name_list = model_name.split(".")
name_prefix, name_postfix = ".".join(model_name_list[:-1]), model_name_list[-1]
if not final and epoch:
model_name = name_prefix + "_epoch_{}".format(epoch) + "." + name_postfix
else:
model_name = name_prefix + "." + name_postfix
model_path = os.path.join(save_dir, model_name)
torch.save(model_info, model_path)
logger.info("model saved as: {}.".format(model_path))
return
@staticmethod
def load(model_path: str):
return torch.load(model_path)
| 1,794 | 31.053571 | 87 | py |
XFL | XFL-master/python/common/utils/auto_descriptor/torch/lr_scheduler.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
lr_scheduler = {
"ConstantLR": {
"factor": Float(0.3333333333333333),
"total_iters": Integer(5),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Optional("last_epoch"), Optional("total_iters"), Optional("factor"), Optional("verbose")]
},
"CosineAnnealingLR": {
"T_max": All("No default value"),
"eta_min": Integer(0),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Required("T_max"), Optional("eta_min"), Optional("last_epoch"), Optional("verbose")]
},
"CosineAnnealingWarmRestarts": {
"T_0": All("No default value"),
"T_mult": Integer(1),
"eta_min": Integer(0),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Required("T_0"), Optional("eta_min"), Optional("last_epoch"), Optional("verbose"), Optional("T_mult")]
},
"CyclicLR": {
"base_lr": All("No default value"),
"max_lr": All("No default value"),
"step_size_up": Integer(2000),
"step_size_down": All(None),
"mode": String("triangular"),
"gamma": Float(1.0),
"scale_fn": All(None),
"scale_mode": String("cycle"),
"cycle_momentum": Bool(True),
"base_momentum": Float(0.8),
"max_momentum": Float(0.9),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Required("base_lr", "max_lr"), Optional("mode"), Optional("base_momentum"), Optional("last_epoch"), Optional("gamma"), Optional("verbose"), Optional("scale_fn"), Optional("max_momentum"), Optional("step_size_down"), Optional("step_size_up"), Optional("cycle_momentum"), Optional("scale_mode")]
},
"ExponentialLR": {
"gamma": All("No default value"),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Required("gamma"), Optional("last_epoch"), Optional("verbose")]
},
"LambdaLR": {
"lr_lambda": All("No default value"),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Required("lr_lambda"), Optional("last_epoch"), Optional("verbose")]
},
"LinearLR": {
"start_factor": Float(0.3333333333333333),
"end_factor": Float(1.0),
"total_iters": Integer(5),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Optional("total_iters"), Optional("end_factor"), Optional("last_epoch"), Optional("start_factor"), Optional("verbose")]
},
"MultiStepLR": {
"milestones": All("No default value"),
"gamma": Float(0.1),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Required("milestones"), Optional("last_epoch"), Optional("gamma"), Optional("verbose")]
},
"MultiplicativeLR": {
"lr_lambda": All("No default value"),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Required("lr_lambda"), Optional("last_epoch"), Optional("verbose")]
},
"OneCycleLR": {
"max_lr": All("No default value"),
"total_steps": All(None),
"epochs": All(None),
"steps_per_epoch": All(None),
"pct_start": Float(0.3),
"anneal_strategy": String("cos"),
"cycle_momentum": Bool(True),
"base_momentum": Float(0.85),
"max_momentum": Float(0.95),
"div_factor": Float(25.0),
"final_div_factor": Float(10000.0),
"three_phase": Bool(False),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Required("max_lr"), Optional("div_factor"), Optional("final_div_factor"), Optional("base_momentum"), Optional("last_epoch"), Optional("verbose"), Optional("pct_start"), Optional("cycle_momentum"), Optional("epochs"), Optional("max_momentum"), Optional("steps_per_epoch"), Optional("total_steps"), Optional("three_phase"), Optional("anneal_strategy")]
},
"ReduceLROnPlateau": {
"mode": String("min"),
"factor": Float(0.1),
"patience": Integer(10),
"threshold": Float(0.0001),
"threshold_mode": String("rel"),
"cooldown": Integer(0),
"min_lr": Integer(0),
"eps": Float(1e-08),
"verbose": Bool(False),
"__rule__": [Optional("mode"), Optional("threshold_mode"), Optional("threshold"), Optional("patience"), Optional("verbose"), Optional("eps"), Optional("cooldown"), Optional("min_lr"), Optional("factor")]
},
"SequentialLR": {
"schedulers": All("No default value"),
"milestones": All("No default value"),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Required("schedulers", "milestones"), Optional("last_epoch"), Optional("verbose")]
},
"StepLR": {
"step_size": All("No default value"),
"gamma": Float(0.1),
"last_epoch": Integer(-1),
"verbose": Bool(False),
"__rule__": [Required("step_size"), Optional("last_epoch"), Optional("gamma"), Optional("verbose")]
}
}
| 5,242 | 42.330579 | 371 | py |
XFL | XFL-master/python/common/utils/auto_descriptor/torch/lossfunc.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
lossfunc = {
"AdaptiveLogSoftmaxWithLoss": {
"in_features": All("No default value"),
"n_classes": All("No default value"),
"cutoffs": All("No default value"),
"div_value": Float(4.0),
"head_bias": Bool(False),
"device": All(None),
"dtype": All(None),
"__rule__": [Required("in_features", "n_classes", "cutoffs"), Optional("head_bias"), Optional("device"), Optional("div_value"), Optional("dtype")]
},
"BCELoss": {
"weight": All(None),
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("weight"), Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"BCEWithLogitsLoss": {
"weight": All(None),
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"pos_weight": All(None),
"__rule__": [Optional("weight"), Optional("reduce"), Optional("size_average"), Optional("pos_weight"), Optional("reduction")]
},
"CTCLoss": {
"blank": Integer(0),
"reduction": String("mean"),
"zero_infinity": Bool(False),
"__rule__": [Optional("blank"), Optional("zero_infinity"), Optional("reduction")]
},
"CosineEmbeddingLoss": {
"margin": Float(0.0),
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("margin"), Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"CrossEntropyLoss": {
"weight": All(None),
"size_average": All(None),
"ignore_index": Integer(-100),
"reduce": All(None),
"reduction": String("mean"),
"label_smoothing": Float(0.0),
"__rule__": [Optional("weight"), Optional("reduce"), Optional("size_average"), Optional("ignore_index"), Optional("label_smoothing"), Optional("reduction")]
},
"GaussianNLLLoss": {
"full": Bool(False),
"eps": Float(1e-06),
"reduction": String("mean"),
"__rule__": [Optional("eps"), Optional("full"), Optional("reduction")]
},
"HingeEmbeddingLoss": {
"margin": Float(1.0),
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("margin"), Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"HuberLoss": {
"reduction": String("mean"),
"delta": Float(1.0),
"__rule__": [Optional("delta"), Optional("reduction")]
},
"KLDivLoss": {
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"log_target": Bool(False),
"__rule__": [Optional("log_target"), Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"L1Loss": {
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"MSELoss": {
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"MarginRankingLoss": {
"margin": Float(0.0),
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("margin"), Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"MultiLabelMarginLoss": {
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"MultiLabelSoftMarginLoss": {
"weight": All(None),
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("weight"), Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"MultiMarginLoss": {
"p": Integer(1),
"margin": Float(1.0),
"weight": All(None),
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("weight"), Optional("reduce"), Optional("p"), Optional("size_average"), Optional("margin"), Optional("reduction")]
},
"NLLLoss": {
"weight": All(None),
"size_average": All(None),
"ignore_index": Integer(-100),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("weight"), Optional("reduce"), Optional("size_average"), Optional("ignore_index"), Optional("reduction")]
},
"NLLLoss2d": {
"weight": All(None),
"size_average": All(None),
"ignore_index": Integer(-100),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("weight"), Optional("reduce"), Optional("size_average"), Optional("ignore_index"), Optional("reduction")]
},
"PoissonNLLLoss": {
"log_input": Bool(True),
"full": Bool(False),
"size_average": All(None),
"eps": Float(1e-08),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("reduction"), Optional("reduce"), Optional("size_average"), Optional("full"), Optional("log_input"), Optional("eps")]
},
"SmoothL1Loss": {
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"beta": Float(1.0),
"__rule__": [Optional("beta"), Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"SoftMarginLoss": {
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("size_average"), Optional("reduce"), Optional("reduction")]
},
"TripletMarginLoss": {
"margin": Float(1.0),
"p": Float(2.0),
"eps": Float(1e-06),
"swap": Bool(False),
"size_average": All(None),
"reduce": All(None),
"reduction": String("mean"),
"__rule__": [Optional("reduction"), Optional("reduce"), Optional("size_average"), Optional("p"), Optional("swap"), Optional("margin"), Optional("eps")]
},
"TripletMarginWithDistanceLoss": {
"distance_function": All(None),
"margin": Float(1.0),
"swap": Bool(False),
"reduction": String("mean"),
"__rule__": [Optional("reduction"), Optional("distance_function"), Optional("margin"), Optional("swap")]
}
}
| 6,846 | 38.125714 | 164 | py |
XFL | XFL-master/python/common/utils/auto_descriptor/torch/metrics.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
metrics = {
"acc": {
"normalize": Bool(True),
"sample_weight": All(None),
"__rule__": [Optional("sample_weight"), Optional("normalize")]
},
"adjusted_mutual_info_score": {
"average_method": String("arithmetic"),
"__rule__": [Optional("average_method")]
},
"adjusted_rand_score": {
},
"auc": {
},
"average_precision_score": {
"average": String("macro"),
"pos_label": Integer(1),
"sample_weight": All(None),
"__rule__": [Optional("sample_weight"), Optional("pos_label"), Optional("average")]
},
"balanced_accuracy_score": {
"sample_weight": All(None),
"adjusted": Bool(False),
"__rule__": [Optional("sample_weight"), Optional("adjusted")]
},
"brier_score_loss": {
"sample_weight": All(None),
"pos_label": All(None),
"__rule__": [Optional("sample_weight"), Optional("pos_label")]
},
"calinski_harabasz_score": {
},
"classification_report": {
"labels": All(None),
"target_names": All(None),
"sample_weight": All(None),
"digits": Integer(2),
"output_dict": Bool(False),
"zero_division": String("warn"),
"__rule__": [Optional("target_names"), Optional("digits"), Optional("labels"), Optional("sample_weight"), Optional("output_dict"), Optional("zero_division")]
},
"completeness_score": {
},
"confusion_matrix": {
"labels": All(None),
"sample_weight": All(None),
"normalize": All(None),
"__rule__": [Optional("sample_weight"), Optional("normalize"), Optional("labels")]
},
"consensus_score": {
"similarity": String("jaccard"),
"__rule__": [Optional("similarity")]
},
"coverage_error": {
"sample_weight": All(None),
"__rule__": [Optional("sample_weight")]
},
"d2_tweedie_score": {
"sample_weight": All(None),
"power": Integer(0),
"__rule__": [Optional("sample_weight"), Optional("power")]
},
"davies_bouldin_score": {
},
"dcg_score": {
"k": All(None),
"log_base": Integer(2),
"sample_weight": All(None),
"ignore_ties": Bool(False),
"__rule__": [Optional("sample_weight"), Optional("ignore_ties"), Optional("k"), Optional("log_base")]
},
"det_curve": {
"pos_label": All(None),
"sample_weight": All(None),
"__rule__": [Optional("sample_weight"), Optional("pos_label")]
},
"euclidean_distances": {
"Y_norm_squared": All(None),
"squared": Bool(False),
"X_norm_squared": All(None),
"__rule__": [Optional("squared"), Optional("X_norm_squared"), Optional("Y_norm_squared")]
},
"explained_variance_score": {
"sample_weight": All(None),
"multioutput": String("uniform_average"),
"__rule__": [Optional("sample_weight"), Optional("multioutput")]
},
"f1_score": {
"labels": All(None),
"pos_label": Integer(1),
"average": String("binary"),
"sample_weight": All(None),
"zero_division": String("warn"),
"__rule__": [Optional("labels"), Optional("sample_weight"), Optional("zero_division"), Optional("pos_label"), Optional("average")]
},
"fbeta_score": {
"beta": All("No default value"),
"labels": All(None),
"pos_label": Integer(1),
"average": String("binary"),
"sample_weight": All(None),
"zero_division": String("warn"),
"__rule__": [Required("beta"), Optional("sample_weight"), Optional("pos_label"), Optional("labels"), Optional("zero_division"), Optional("average")]
},
"fowlkes_mallows_score": {
"sparse": Bool(False),
"__rule__": [Optional("sparse")]
},
"hamming_loss": {
"sample_weight": All(None),
"__rule__": [Optional("sample_weight")]
},
"homogeneity_completeness_v_measure": {
"beta": Float(1.0),
"__rule__": [Optional("beta")]
},
"homogeneity_score": {
},
"jaccard_score": {
"labels": All(None),
"pos_label": Integer(1),
"average": String("binary"),
"sample_weight": All(None),
"zero_division": String("warn"),
"__rule__": [Optional("labels"), Optional("sample_weight"), Optional("zero_division"), Optional("pos_label"), Optional("average")]
},
"label_ranking_average_precision_score": {
"sample_weight": All(None),
"__rule__": [Optional("sample_weight")]
},
"label_ranking_loss": {
"sample_weight": All(None),
"__rule__": [Optional("sample_weight")]
},
"log_loss": {
"eps": Float(1e-15),
"normalize": Bool(True),
"sample_weight": All(None),
"labels": All(None),
"__rule__": [Optional("sample_weight"), Optional("normalize"), Optional("labels"), Optional("eps")]
},
"matthews_corrcoef": {
"sample_weight": All(None),
"__rule__": [Optional("sample_weight")]
},
"max_error": {
},
"mae": {
"sample_weight": All(None),
"multioutput": String("uniform_average"),
"__rule__": [Optional("sample_weight"), Optional("multioutput")]
},
"mape": {
"sample_weight": All(None),
"multioutput": String("uniform_average"),
"__rule__": [Optional("sample_weight"), Optional("multioutput")]
},
"mean_gamma_deviance": {
"sample_weight": All(None),
"__rule__": [Optional("sample_weight")]
},
"mean_pinball_loss": {
"sample_weight": All(None),
"alpha": Float(0.5),
"multioutput": String("uniform_average"),
"__rule__": [Optional("sample_weight"), Optional("alpha"), Optional("multioutput")]
},
"mean_poisson_deviance": {
"sample_weight": All(None),
"__rule__": [Optional("sample_weight")]
},
"mse": {
"sample_weight": All(None),
"multioutput": String("uniform_average"),
"squared": Bool(True),
"__rule__": [Optional("sample_weight"), Optional("multioutput"), Optional("squared")]
},
"mean_squared_log_error": {
"sample_weight": All(None),
"multioutput": String("uniform_average"),
"squared": Bool(True),
"__rule__": [Optional("sample_weight"), Optional("multioutput"), Optional("squared")]
},
"mean_tweedie_deviance": {
"sample_weight": All(None),
"power": Integer(0),
"__rule__": [Optional("sample_weight"), Optional("power")]
},
"median_ae": {
"multioutput": String("uniform_average"),
"sample_weight": All(None),
"__rule__": [Optional("sample_weight"), Optional("multioutput")]
},
"multilabel_confusion_matrix": {
"sample_weight": All(None),
"labels": All(None),
"samplewise": Bool(False),
"__rule__": [Optional("sample_weight"), Optional("samplewise"), Optional("labels")]
},
"mutual_info_score": {
"contingency": All(None),
"__rule__": [Optional("contingency")]
},
"nan_euclidean_distances": {
"squared": Bool(False),
"missing_values": Float(None),
"copy": Bool(True),
"__rule__": [Optional("squared"), Optional("missing_values"), Optional("copy")]
},
"ndcg_score": {
"k": All(None),
"sample_weight": All(None),
"ignore_ties": Bool(False),
"__rule__": [Optional("sample_weight"), Optional("ignore_ties"), Optional("k")]
},
"normalized_mutual_info_score": {
"average_method": String("arithmetic"),
"__rule__": [Optional("average_method")]
},
"pair_confusion_matrix": {
},
"pairwise_distances": {
"metric": String("euclidean"),
"n_jobs": All(None),
"force_all_finite": Bool(True),
"__rule__": [Optional("force_all_finite"), Optional("n_jobs"), Optional("metric")]
},
"pairwise_distances_argmin": {
"axis": Integer(1),
"metric": String("euclidean"),
"metric_kwargs": All(None),
"__rule__": [Optional("axis"), Optional("metric_kwargs"), Optional("metric")]
},
"pairwise_distances_argmin_min": {
"axis": Integer(1),
"metric": String("euclidean"),
"metric_kwargs": All(None),
"__rule__": [Optional("axis"), Optional("metric_kwargs"), Optional("metric")]
},
"pairwise_distances_chunked": {
"reduce_func": All(None),
"metric": String("euclidean"),
"n_jobs": All(None),
"working_memory": All(None),
"__rule__": [Optional("working_memory"), Optional("reduce_func"), Optional("n_jobs"), Optional("metric")]
},
"pairwise_kernels": {
"metric": String("linear"),
"filter_params": Bool(False),
"n_jobs": All(None),
"__rule__": [Optional("filter_params"), Optional("n_jobs"), Optional("metric")]
},
"precision_recall_fscore_support": {
"beta": Float(1.0),
"labels": All(None),
"pos_label": Integer(1),
"average": All(None),
"warn_for": [
String("precision"),
String("recall"),
String("f-score"),
],
"sample_weight": All(None),
"zero_division": String("warn"),
"__rule__": [Optional("warn_for"), Optional("beta"), Optional("labels"), Optional("sample_weight"), Optional("zero_division"), Optional("pos_label"), Optional("average")]
},
"precision": {
"labels": All(None),
"pos_label": Integer(1),
"average": String("binary"),
"sample_weight": All(None),
"zero_division": String("warn"),
"__rule__": [Optional("labels"), Optional("sample_weight"), Optional("zero_division"), Optional("pos_label"), Optional("average")]
},
"r2": {
"sample_weight": All(None),
"multioutput": String("uniform_average"),
"__rule__": [Optional("sample_weight"), Optional("multioutput")]
},
"rand_score": {
},
"recall": {
"labels": All(None),
"pos_label": Integer(1),
"average": String("binary"),
"sample_weight": All(None),
"zero_division": String("warn"),
"__rule__": [Optional("labels"), Optional("sample_weight"), Optional("zero_division"), Optional("pos_label"), Optional("average")]
},
"auc": {
"average": String("macro"),
"sample_weight": All(None),
"max_fpr": All(None),
"multi_class": String("raise"),
"labels": All(None),
"__rule__": [Optional("max_fpr"), Optional("labels"), Optional("sample_weight"), Optional("multi_class"), Optional("average")]
},
"roc_curve": {
"pos_label": All(None),
"sample_weight": All(None),
"drop_intermediate": Bool(True),
"__rule__": [Optional("sample_weight"), Optional("drop_intermediate"), Optional("pos_label")]
},
"silhouette_samples": {
"metric": String("euclidean"),
"__rule__": [Optional("metric")]
},
"silhouette_score": {
"metric": String("euclidean"),
"sample_size": All(None),
"random_state": All(None),
"__rule__": [Optional("sample_size"), Optional("random_state"), Optional("metric")]
},
"top_k_accuracy_score": {
"k": Integer(2),
"normalize": Bool(True),
"sample_weight": All(None),
"labels": All(None),
"__rule__": [Optional("sample_weight"), Optional("normalize"), Optional("k"), Optional("labels")]
},
"v_measure_score": {
"beta": Float(1.0),
"__rule__": [Optional("beta")]
},
"zero_one_loss": {
"normalize": Bool(True),
"sample_weight": All(None),
"__rule__": [Optional("sample_weight"), Optional("normalize")]
},
"ks": {
},
"rmse": {
}
}
| 12,060 | 34.578171 | 178 | py |
XFL | XFL-master/python/common/utils/auto_descriptor/torch/torch_descriptor.py |
import inspect
import math
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.nn as nn
import sklearn.metrics as sklearn_metrics
import algorithm.core.metrics as custom_metrics
from algorithm.core.metrics import metric_dict
# from common.checker.qualifiers import (OneOf, Optional, RepeatableSomeOf,
# Required, SomeOf)
# from common.checker.x_types import All, Any, Bool, Float, Integer, String
def gen_torch_optim_dict(out_path: str):
methods = [getattr(optim, name) for name in dir(optim) if isinstance(getattr(optim, name), type) and name not in ['Optimizer']]
blank = ''
with open(out_path, 'w') as f:
f.write('from common.checker.x_types import String, Bool, Integer, Float, Any, All\n')
f.write('from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional\n')
f.write('\n\n')
f.write('optimizer = {')
blank += ' '
for i, method in enumerate(methods):
# print(method.__name__)
# print(inspect.getfullargspec(method))
# print(inspect.signature(method).parameters)
mark0 = ',' if i > 0 else ''
f.write(f'{mark0}\n' + blank + f'"{method.__name__}": ' + '{')
blank += ' '
params = list(inspect.signature(method).parameters.values())
required_params = []
whole_params = []
for j, param in enumerate(params):
name = param.name
default = param.default
mark1 = ',' if j > 1 else ''
# Don't support params
if name == 'params':
continue
# No default lr value for SGD
# if name == 'lr' and not isinstance(name, (int, float)):
# default = 0.001
if isinstance(default, bool):
f.write(f'{mark1}\n' + blank + f'"{name}": Bool({default})')
elif isinstance(default, int):
f.write(f'{mark1}\n' + blank + f'"{name}": Integer({default})')
elif isinstance(default, float):
default = None if math.isnan(default) else default
f.write(f'{mark1}\n' + blank + f'"{name}": Float({default})')
elif isinstance(default, str):
f.write(f'{mark1}\n' + blank + f'"{name}": String("{default}")')
elif isinstance(default, (list, tuple)):
f.write(f'{mark1}\n' + blank + f'"{name}": [')
for k, item in enumerate(default):
mark2 = ',' if k != 0 else ''
if isinstance(item, bool):
v = f'Bool({item})'
elif isinstance(item, int):
v = f'Integer({item})'
elif isinstance(item, float):
item = None if math.isnan(item) else item
v = f'Float({item})'
elif isinstance(item, str):
v = f'String("{item}")'
else:
v = f'Any({item})'
f.write(f'{mark2}\n' + blank + ' ' + v)
f.write(f'{mark1}\n' + blank + ' ' + ']')
elif default is None:
f.write(f'{mark1}\n' + blank + f'"{name}": All(None)')
else:
f.write(f'{mark1}\n' + blank + f'"{name}": ' + 'All("No default value")')
required_params.append(name)
print(f"{name}, {default}")
pass
whole_params.append(name)
if len(whole_params) != 0:
mark2 = ',' if len(whole_params) > 0 else ''
f.write(f'{mark2}\n' + blank + '"__rule__": [')
if len(required_params) > 0:
f.write("Required(")
for j, name in enumerate(required_params):
mark3 = ', ' if j > 0 else ''
f.write(f'{mark3}"{name}"')
f.write(")")
optional_params = list(set(whole_params) - set(required_params))
for j, name in enumerate(optional_params):
mark3 = ', ' if len(required_params) > 0 or j > 0 else ''
f.write(f'{mark3}Optional("{name}")')
f.write(']')
blank = blank[:-4]
f.write('\n' + blank + '}')
f.write('\n}\n')
def gen_torch_lr_scheduler_dict(out_path: str):
methods = [getattr(lr_scheduler, name) for name in dir(lr_scheduler) if isinstance(getattr(lr_scheduler, name), type) and '_' not in name and name not in ['Optimizer', 'ChainedScheduler', 'Counter']]
blank = ''
with open(out_path, 'w') as f:
f.write('from common.checker.x_types import String, Bool, Integer, Float, Any, All\n')
f.write('from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional\n')
f.write('\n\n')
f.write('lr_scheduler = {')
blank += ' '
for i, method in enumerate(methods):
# print(method.__name__)
# print(inspect.getfullargspec(method))
# print(inspect.signature(method).parameters)
mark0 = ',' if i > 0 else ''
f.write(f'{mark0}\n' + blank + f'"{method.__name__}": ' + '{')
blank += ' '
params = list(inspect.signature(method).parameters.values())
required_params = []
whole_params = []
for j, param in enumerate(params):
name = param.name
default = param.default
mark1 = ',' if j > 1 else ''
# Don't support optimizer
if name == 'optimizer':
continue
if isinstance(default, bool):
f.write(f'{mark1}\n' + blank + f'"{name}": Bool({default})')
elif isinstance(default, int):
f.write(f'{mark1}\n' + blank + f'"{name}": Integer({default})')
elif isinstance(default, float):
default = None if math.isnan(default) else default
f.write(f'{mark1}\n' + blank + f'"{name}": Float({default})')
elif isinstance(default, str):
f.write(f'{mark1}\n' + blank + f'"{name}": String("{default}")')
elif isinstance(default, (list, tuple)):
f.write(f'{mark1}\n' + blank + f'"{name}": [')
for k, item in enumerate(default):
mark2 = ',' if k != 0 else ''
if isinstance(item, bool):
v = f'Bool({item})'
elif isinstance(item, int):
v = f'Integer({item})'
elif isinstance(item, float):
item = None if math.isnan(item) else item
v = f'Float({item})'
elif isinstance(item, str):
v = f'String("{item}")'
else:
v = f'Any({item})'
f.write(f'{mark2}\n' + blank + ' ' + v)
f.write(f'{mark1}\n' + blank + ' ' + ']')
elif default is None:
f.write(f'{mark1}\n' + blank + f'"{name}": All(None)')
else:
f.write(f'{mark1}\n' + blank + f'"{name}": ' + 'All("No default value")')
required_params.append(name)
print(f"{name}, {default}")
pass
whole_params.append(name)
if len(whole_params) != 0:
mark2 = ',' if len(whole_params) > 0 else ''
f.write(f'{mark2}\n' + blank + '"__rule__": [')
if len(required_params) > 0:
f.write("Required(")
for j, name in enumerate(required_params):
mark3 = ', ' if j > 0 else ''
f.write(f'{mark3}"{name}"')
f.write(")")
optional_params = list(set(whole_params) - set(required_params))
for j, name in enumerate(optional_params):
mark3 = ', ' if len(required_params) > 0 or j > 0 else ''
f.write(f'{mark3}Optional("{name}")')
f.write(']')
blank = blank[:-4]
f.write('\n' + blank + '}')
f.write('\n}\n')
def gen_torch_lossfunc_dict(out_path: str):
methods = [getattr(nn, name) for name in dir(nn) if isinstance(getattr(nn, name), type) and 'Loss' in name]
blank = ''
with open(out_path, 'w') as f:
f.write('from common.checker.x_types import String, Bool, Integer, Float, Any, All\n')
f.write('from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional\n')
f.write('\n\n')
f.write('lossfunc = {')
blank += ' '
for i, method in enumerate(methods):
# print(method.__name__)
# print(inspect.getfullargspec(method))
# print(inspect.signature(method).parameters)
mark0 = ',' if i > 0 else ''
f.write(f'{mark0}\n' + blank + f'"{method.__name__}": ' + '{')
blank += ' '
params = list(inspect.signature(method).parameters.values())
required_params = []
whole_params = []
for j, param in enumerate(params):
name = param.name
default = param.default
mark1 = ',' if j > 0 else ''
# Don't support params
# if name == 'optimizer':
# continue
if isinstance(default, bool):
f.write(f'{mark1}\n' + blank + f'"{name}": Bool({default})')
elif isinstance(default, int):
f.write(f'{mark1}\n' + blank + f'"{name}": Integer({default})')
elif isinstance(default, float):
default = None if math.isnan(default) else default
f.write(f'{mark1}\n' + blank + f'"{name}": Float({default})')
elif isinstance(default, str):
f.write(f'{mark1}\n' + blank + f'"{name}": String("{default}")')
elif isinstance(default, (list, tuple)):
f.write(f'{mark1}\n' + blank + f'"{name}": [')
for k, item in enumerate(default):
mark2 = ',' if k != 0 else ''
if isinstance(item, bool):
v = f'Bool({item})'
elif isinstance(item, int):
v = f'Integer({item})'
elif isinstance(item, float):
item = None if math.isnan(item) else item
v = f'Float({item})'
elif isinstance(item, str):
v = f'String("{item}")'
else:
v = f'Any({item})'
f.write(f'{mark2}\n' + blank + ' ' + v)
f.write(f'{mark1}\n' + blank + ' ' + ']')
elif default is None:
f.write(f'{mark1}\n' + blank + f'"{name}": All(None)')
else:
f.write(f'{mark1}\n' + blank + f'"{name}": ' + 'All("No default value")')
required_params.append(name)
print(f"{name}, {default}")
pass
whole_params.append(name)
if len(whole_params) != 0:
mark2 = ',' if len(whole_params) > 0 else ''
f.write(f'{mark2}\n' + blank + '"__rule__": [')
if len(required_params) > 0:
f.write("Required(")
for j, name in enumerate(required_params):
mark3 = ', ' if j > 0 else ''
f.write(f'{mark3}"{name}"')
f.write(")")
optional_params = list(set(whole_params) - set(required_params))
for j, name in enumerate(optional_params):
mark3 = ', ' if len(required_params) > 0 or j > 0 else ''
f.write(f'{mark3}Optional("{name}")')
f.write(']')
blank = blank[:-4]
f.write('\n' + blank + '}')
f.write('\n}\n')
def gen_metric_dict(out_path: str):
candidate_methods_name = dir(sklearn_metrics) # [getattr(sklearn_metrics, name) for name in dir(sklearn_metrics)]
valid_combination = [('y_true', 'y_pred'), ('X', 'Y'), ('y_true', 'y_score'), ('X', 'labels'), ('labels_true', 'labels_pred'), ('x', 'y'), ('y_true', 'y_prob'), ('X', 'labels'), ('a', 'b')]
methods = []
for name in candidate_methods_name:
method = getattr(sklearn_metrics, name)
if inspect.isfunction(method):
params = list(inspect.signature(method).parameters.keys())
if len(params) >= 2:
if (params[0], params[1]) in valid_combination:
methods.append(name)
# print(params, name)
methods = [getattr(sklearn_metrics, name) for name in methods]
custom_methods = []
for name in dir(custom_metrics):
method = getattr(custom_metrics, name)
if inspect.isfunction(method):
if name not in ["get_metric"]:
custom_methods.append(name)
custom_methods = [getattr(custom_metrics, name) for name in custom_methods]
names_map = {v: k for k, v in metric_dict.items()}
# print(list(set(dir(sklearn_metrics)) - set(methods)))
# print("####")
# for name in list(set(dir(sklearn_metrics)) - set(methods)):
# method = getattr(sklearn_metrics, name)
# if inspect.isfunction(method):
# print(list(inspect.signature(method).parameters.keys()), name)
blank = ''
with open(out_path, 'w') as f:
f.write('from common.checker.x_types import String, Bool, Integer, Float, Any, All\n')
f.write('from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional\n')
f.write('\n\n')
f.write('metrics = {')
blank += ' '
for i, method in enumerate(methods + custom_methods):
# print(method.__name__)
# print(inspect.getfullargspec(method))
# print(inspect.signature(method).parameters)
mark0 = ',' if i > 0 else ''
if method.__name__ in names_map:
f.write(f'{mark0}\n' + blank + f'"{names_map[method.__name__]}": ' + '{')
else:
f.write(f'{mark0}\n' + blank + f'"{method.__name__}": ' + '{')
blank += ' '
params = list(inspect.signature(method).parameters.values())[2:]
required_params = []
whole_params = []
is_first = True
for j, param in enumerate(params):
name = param.name
default = param.default
if name == 'kwds':
continue
mark1 = ',' if is_first is False else ''
is_first = False
# Don't support params
# if name == 'optimizer':
# continue
if isinstance(default, bool):
f.write(f'{mark1}\n' + blank + f'"{name}": Bool({default})')
elif isinstance(default, int):
f.write(f'{mark1}\n' + blank + f'"{name}": Integer({default})')
elif isinstance(default, float):
default = None if math.isnan(default) else default
f.write(f'{mark1}\n' + blank + f'"{name}": Float({default})')
elif isinstance(default, str):
f.write(f'{mark1}\n' + blank + f'"{name}": String("{default}")')
elif isinstance(default, (list, tuple)):
f.write(f'{mark1}\n' + blank + f'"{name}": [')
for k, item in enumerate(default):
mark2 = ',' if k != 0 else ''
if isinstance(item, bool):
v = f'Bool({item})'
elif isinstance(item, int):
item = None if math.isnan(item) else item
v = f'Integer({item})'
elif isinstance(item, float):
v = f'Float({item})'
elif isinstance(item, str):
v = f'String("{item}")'
else:
v = f'Any({item})'
f.write(f'{mark2}\n' + blank + ' ' + v)
f.write(f'{mark1}\n' + blank + ' ' + ']')
elif default is None:
f.write(f'{mark1}\n' + blank + f'"{name}": All(None)')
else:
f.write(f'{mark1}\n' + blank + f'"{name}": ' + 'All("No default value")')
required_params.append(name)
print(f"{name}, {default}")
pass
whole_params.append(name)
if len(whole_params) != 0:
mark2 = ',' if len(whole_params) > 0 else ''
f.write(f'{mark2}\n' + blank + '"__rule__": [')
if len(required_params) > 0:
f.write("Required(")
for j, name in enumerate(required_params):
mark3 = ', ' if j > 0 else ''
f.write(f'{mark3}"{name}"')
f.write(")")
optional_params = list(set(whole_params) - set(required_params))
for j, name in enumerate(optional_params):
mark3 = ', ' if len(required_params) > 0 or j > 0 else ''
f.write(f'{mark3}Optional("{name}")')
f.write(']')
blank = blank[:-4]
f.write('\n' + blank + '}')
f.write('\n}\n')
if __name__ == "__main__":
from pathlib import Path
out_path = Path(__file__).parent / 'optimizer.py'
gen_torch_optim_dict(out_path)
out_path = Path(__file__).parent / 'lr_scheduler.py'
gen_torch_lr_scheduler_dict(out_path)
out_path = Path(__file__).parent / 'lossfunc.py'
gen_torch_lossfunc_dict(out_path)
out_path = Path(__file__).parent / 'metrics.py'
gen_metric_dict(out_path)
| 20,063 | 41.780384 | 203 | py |
XFL | XFL-master/python/common/utils/auto_descriptor/torch/optimizer.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
optimizer = {
"ASGD": {
"lr": Float(0.01),
"lambd": Float(0.0001),
"alpha": Float(0.75),
"t0": Float(1000000.0),
"weight_decay": Integer(0),
"__rule__": [Optional("lr"), Optional("alpha"), Optional("t0"), Optional("lambd"), Optional("weight_decay")]
},
"Adadelta": {
"lr": Float(1.0),
"rho": Float(0.9),
"eps": Float(1e-06),
"weight_decay": Integer(0),
"__rule__": [Optional("lr"), Optional("weight_decay"), Optional("rho"), Optional("eps")]
},
"Adagrad": {
"lr": Float(0.01),
"lr_decay": Integer(0),
"weight_decay": Integer(0),
"initial_accumulator_value": Integer(0),
"eps": Float(1e-10),
"__rule__": [Optional("lr"), Optional("weight_decay"), Optional("initial_accumulator_value"), Optional("lr_decay"), Optional("eps")]
},
"Adam": {
"lr": Float(0.001),
"betas": [
Float(0.9),
Float(0.999),
],
"eps": Float(1e-08),
"weight_decay": Integer(0),
"amsgrad": Bool(False),
"maximize": Bool(False),
"__rule__": [Optional("lr"), Optional("eps"), Optional("maximize"), Optional("betas"), Optional("amsgrad"), Optional("weight_decay")]
},
"AdamW": {
"lr": Float(0.001),
"betas": [
Float(0.9),
Float(0.999),
],
"eps": Float(1e-08),
"weight_decay": Float(0.01),
"amsgrad": Bool(False),
"maximize": Bool(False),
"__rule__": [Optional("lr"), Optional("eps"), Optional("maximize"), Optional("betas"), Optional("amsgrad"), Optional("weight_decay")]
},
"Adamax": {
"lr": Float(0.002),
"betas": [
Float(0.9),
Float(0.999),
],
"eps": Float(1e-08),
"weight_decay": Integer(0),
"__rule__": [Optional("lr"), Optional("weight_decay"), Optional("eps"), Optional("betas")]
},
"LBFGS": {
"lr": Integer(1),
"max_iter": Integer(20),
"max_eval": All(None),
"tolerance_grad": Float(1e-07),
"tolerance_change": Float(1e-09),
"history_size": Integer(100),
"line_search_fn": All(None),
"__rule__": [Optional("tolerance_change"), Optional("lr"), Optional("history_size"), Optional("tolerance_grad"), Optional("max_eval"), Optional("line_search_fn"), Optional("max_iter")]
},
"NAdam": {
"lr": Float(0.002),
"betas": [
Float(0.9),
Float(0.999),
],
"eps": Float(1e-08),
"weight_decay": Integer(0),
"momentum_decay": Float(0.004),
"__rule__": [Optional("lr"), Optional("eps"), Optional("betas"), Optional("momentum_decay"), Optional("weight_decay")]
},
"RAdam": {
"lr": Float(0.001),
"betas": [
Float(0.9),
Float(0.999),
],
"eps": Float(1e-08),
"weight_decay": Integer(0),
"__rule__": [Optional("lr"), Optional("weight_decay"), Optional("eps"), Optional("betas")]
},
"RMSprop": {
"lr": Float(0.01),
"alpha": Float(0.99),
"eps": Float(1e-08),
"weight_decay": Integer(0),
"momentum": Integer(0),
"centered": Bool(False),
"__rule__": [Optional("lr"), Optional("alpha"), Optional("eps"), Optional("momentum"), Optional("centered"), Optional("weight_decay")]
},
"Rprop": {
"lr": Float(0.01),
"etas": [
Float(0.5),
Float(1.2),
],
"step_sizes": [
Float(1e-06),
Integer(50),
],
"__rule__": [Optional("lr"), Optional("etas"), Optional("step_sizes")]
},
"SGD": {
"lr": All("No default value"),
"momentum": Integer(0),
"dampening": Integer(0),
"weight_decay": Integer(0),
"nesterov": Bool(False),
"maximize": Bool(False),
"__rule__": [Required("lr"), Optional("dampening"), Optional("nesterov"), Optional("maximize"), Optional("weight_decay"), Optional("momentum")]
},
"SparseAdam": {
"lr": Float(0.001),
"betas": [
Float(0.9),
Float(0.999),
],
"eps": Float(1e-08),
"__rule__": [Optional("lr"), Optional("betas"), Optional("eps")]
}
}
| 4,627 | 33.537313 | 192 | py |
XFL | XFL-master/python/common/model/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/common/model/python/tree_model_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tree_model.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tree_model.proto',
package='model',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x10tree_model.proto\x12\x05model\"\x84\x01\n\tSplitInfo\x12\x10\n\x08owner_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x65\x61ture_idx\x18\x02 \x01(\x05\x12\x14\n\x0c\x66\x65\x61ture_name\x18\x03 \x01(\t\x12\x13\n\x0bis_category\x18\x04 \x01(\x08\x12\x13\n\x0bsplit_point\x18\x05 \x01(\x01\x12\x10\n\x08left_cat\x18\x06 \x03(\x01\"\xa6\x01\n\x04Node\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x64\x65pth\x18\x02 \x01(\x05\x12\x14\n\x0cleft_node_id\x18\x03 \x01(\t\x12\x15\n\rright_node_id\x18\x04 \x01(\t\x12$\n\nsplit_info\x18\x05 \x01(\x0b\x32\x10.model.SplitInfo\x12\x0f\n\x07is_leaf\x18\x06 \x01(\x08\x12\x0e\n\x06weight\x18\x07 \x01(\x01\x12\x0f\n\x07linkage\x18\x08 \x01(\t\"\xa4\x01\n\x04Tree\x12\x10\n\x08party_id\x18\x01 \x01(\t\x12\x12\n\ntree_index\x18\x02 \x01(\x05\x12\x14\n\x0croot_node_id\x18\x03 \x01(\t\x12%\n\x05nodes\x18\x04 \x03(\x0b\x32\x16.model.Tree.NodesEntry\x1a\x39\n\nNodesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1a\n\x05value\x18\x02 \x01(\x0b\x32\x0b.model.Node:\x02\x38\x01\"\"\n\nNodeIdList\x12\x14\n\x0cnode_id_list\x18\x01 \x03(\t\"\xa1\x02\n\x0cXGBoostModel\x12\x19\n\x11suggest_threshold\x18\x01 \x01(\x01\x12\n\n\x02lr\x18\x02 \x03(\x01\x12\x11\n\tmax_depth\x18\x03 \x03(\x05\x12\x1a\n\x05trees\x18\x04 \x03(\x0b\x32\x0b.model.Tree\x12\x0f\n\x07version\x18\x05 \x01(\t\x12\x13\n\x0bloss_method\x18\x06 \x01(\t\x12\x11\n\tnum_trees\x18\x07 \x01(\x05\x12;\n\rnode_id_group\x18\x08 \x03(\x0b\x32$.model.XGBoostModel.NodeIdGroupEntry\x1a\x45\n\x10NodeIdGroupEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.model.NodeIdList:\x02\x38\x01\"r\n\tNodeModel\x12*\n\x05nodes\x18\x01 \x03(\x0b\x32\x1b.model.NodeModel.NodesEntry\x1a\x39\n\nNodesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1a\n\x05value\x18\x02 \x01(\x0b\x32\x0b.model.Node:\x02\x38\x01\x62\x06proto3'
)
_SPLITINFO = _descriptor.Descriptor(
name='SplitInfo',
full_name='model.SplitInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='owner_id', full_name='model.SplitInfo.owner_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='feature_idx', full_name='model.SplitInfo.feature_idx', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='feature_name', full_name='model.SplitInfo.feature_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_category', full_name='model.SplitInfo.is_category', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='split_point', full_name='model.SplitInfo.split_point', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='left_cat', full_name='model.SplitInfo.left_cat', index=5,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=160,
)
_NODE = _descriptor.Descriptor(
name='Node',
full_name='model.Node',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='model.Node.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='depth', full_name='model.Node.depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='left_node_id', full_name='model.Node.left_node_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='right_node_id', full_name='model.Node.right_node_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='split_info', full_name='model.Node.split_info', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_leaf', full_name='model.Node.is_leaf', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='weight', full_name='model.Node.weight', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='linkage', full_name='model.Node.linkage', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=329,
)
_TREE_NODESENTRY = _descriptor.Descriptor(
name='NodesEntry',
full_name='model.Tree.NodesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='model.Tree.NodesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='model.Tree.NodesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=439,
serialized_end=496,
)
_TREE = _descriptor.Descriptor(
name='Tree',
full_name='model.Tree',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='party_id', full_name='model.Tree.party_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tree_index', full_name='model.Tree.tree_index', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='root_node_id', full_name='model.Tree.root_node_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nodes', full_name='model.Tree.nodes', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_TREE_NODESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=332,
serialized_end=496,
)
_NODEIDLIST = _descriptor.Descriptor(
name='NodeIdList',
full_name='model.NodeIdList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='node_id_list', full_name='model.NodeIdList.node_id_list', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=498,
serialized_end=532,
)
_XGBOOSTMODEL_NODEIDGROUPENTRY = _descriptor.Descriptor(
name='NodeIdGroupEntry',
full_name='model.XGBoostModel.NodeIdGroupEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='model.XGBoostModel.NodeIdGroupEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='model.XGBoostModel.NodeIdGroupEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=755,
serialized_end=824,
)
_XGBOOSTMODEL = _descriptor.Descriptor(
name='XGBoostModel',
full_name='model.XGBoostModel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='suggest_threshold', full_name='model.XGBoostModel.suggest_threshold', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='lr', full_name='model.XGBoostModel.lr', index=1,
number=2, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_depth', full_name='model.XGBoostModel.max_depth', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trees', full_name='model.XGBoostModel.trees', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='model.XGBoostModel.version', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='loss_method', full_name='model.XGBoostModel.loss_method', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='num_trees', full_name='model.XGBoostModel.num_trees', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='node_id_group', full_name='model.XGBoostModel.node_id_group', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_XGBOOSTMODEL_NODEIDGROUPENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=535,
serialized_end=824,
)
_NODEMODEL_NODESENTRY = _descriptor.Descriptor(
name='NodesEntry',
full_name='model.NodeModel.NodesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='model.NodeModel.NodesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='model.NodeModel.NodesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=439,
serialized_end=496,
)
_NODEMODEL = _descriptor.Descriptor(
name='NodeModel',
full_name='model.NodeModel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='nodes', full_name='model.NodeModel.nodes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_NODEMODEL_NODESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=826,
serialized_end=940,
)
_NODE.fields_by_name['split_info'].message_type = _SPLITINFO
_TREE_NODESENTRY.fields_by_name['value'].message_type = _NODE
_TREE_NODESENTRY.containing_type = _TREE
_TREE.fields_by_name['nodes'].message_type = _TREE_NODESENTRY
_XGBOOSTMODEL_NODEIDGROUPENTRY.fields_by_name['value'].message_type = _NODEIDLIST
_XGBOOSTMODEL_NODEIDGROUPENTRY.containing_type = _XGBOOSTMODEL
_XGBOOSTMODEL.fields_by_name['trees'].message_type = _TREE
_XGBOOSTMODEL.fields_by_name['node_id_group'].message_type = _XGBOOSTMODEL_NODEIDGROUPENTRY
_NODEMODEL_NODESENTRY.fields_by_name['value'].message_type = _NODE
_NODEMODEL_NODESENTRY.containing_type = _NODEMODEL
_NODEMODEL.fields_by_name['nodes'].message_type = _NODEMODEL_NODESENTRY
DESCRIPTOR.message_types_by_name['SplitInfo'] = _SPLITINFO
DESCRIPTOR.message_types_by_name['Node'] = _NODE
DESCRIPTOR.message_types_by_name['Tree'] = _TREE
DESCRIPTOR.message_types_by_name['NodeIdList'] = _NODEIDLIST
DESCRIPTOR.message_types_by_name['XGBoostModel'] = _XGBOOSTMODEL
DESCRIPTOR.message_types_by_name['NodeModel'] = _NODEMODEL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SplitInfo = _reflection.GeneratedProtocolMessageType('SplitInfo', (_message.Message,), {
'DESCRIPTOR' : _SPLITINFO,
'__module__' : 'tree_model_pb2'
# @@protoc_insertion_point(class_scope:model.SplitInfo)
})
_sym_db.RegisterMessage(SplitInfo)
Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), {
'DESCRIPTOR' : _NODE,
'__module__' : 'tree_model_pb2'
# @@protoc_insertion_point(class_scope:model.Node)
})
_sym_db.RegisterMessage(Node)
Tree = _reflection.GeneratedProtocolMessageType('Tree', (_message.Message,), {
'NodesEntry' : _reflection.GeneratedProtocolMessageType('NodesEntry', (_message.Message,), {
'DESCRIPTOR' : _TREE_NODESENTRY,
'__module__' : 'tree_model_pb2'
# @@protoc_insertion_point(class_scope:model.Tree.NodesEntry)
})
,
'DESCRIPTOR' : _TREE,
'__module__' : 'tree_model_pb2'
# @@protoc_insertion_point(class_scope:model.Tree)
})
_sym_db.RegisterMessage(Tree)
_sym_db.RegisterMessage(Tree.NodesEntry)
NodeIdList = _reflection.GeneratedProtocolMessageType('NodeIdList', (_message.Message,), {
'DESCRIPTOR' : _NODEIDLIST,
'__module__' : 'tree_model_pb2'
# @@protoc_insertion_point(class_scope:model.NodeIdList)
})
_sym_db.RegisterMessage(NodeIdList)
XGBoostModel = _reflection.GeneratedProtocolMessageType('XGBoostModel', (_message.Message,), {
'NodeIdGroupEntry' : _reflection.GeneratedProtocolMessageType('NodeIdGroupEntry', (_message.Message,), {
'DESCRIPTOR' : _XGBOOSTMODEL_NODEIDGROUPENTRY,
'__module__' : 'tree_model_pb2'
# @@protoc_insertion_point(class_scope:model.XGBoostModel.NodeIdGroupEntry)
})
,
'DESCRIPTOR' : _XGBOOSTMODEL,
'__module__' : 'tree_model_pb2'
# @@protoc_insertion_point(class_scope:model.XGBoostModel)
})
_sym_db.RegisterMessage(XGBoostModel)
_sym_db.RegisterMessage(XGBoostModel.NodeIdGroupEntry)
NodeModel = _reflection.GeneratedProtocolMessageType('NodeModel', (_message.Message,), {
'NodesEntry' : _reflection.GeneratedProtocolMessageType('NodesEntry', (_message.Message,), {
'DESCRIPTOR' : _NODEMODEL_NODESENTRY,
'__module__' : 'tree_model_pb2'
# @@protoc_insertion_point(class_scope:model.NodeModel.NodesEntry)
})
,
'DESCRIPTOR' : _NODEMODEL,
'__module__' : 'tree_model_pb2'
# @@protoc_insertion_point(class_scope:model.NodeModel)
})
_sym_db.RegisterMessage(NodeModel)
_sym_db.RegisterMessage(NodeModel.NodesEntry)
_TREE_NODESENTRY._options = None
_XGBOOSTMODEL_NODEIDGROUPENTRY._options = None
_NODEMODEL_NODESENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 24,141 | 40.840555 | 1,841 | py |
XFL | XFL-master/python/common/model/python/feature_model_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: feature_model.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='feature_model.proto',
package='model',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x13\x66\x65\x61ture_model.proto\x12\x05model\"#\n\x07NaValue\x12\x0b\n\x03ori\x18\x01 \x01(\x01\x12\x0b\n\x03val\x18\x02 \x01(\x01\"Z\n\x07\x42inning\x12\x15\n\rbinning_split\x18\x01 \x03(\x01\x12\x0b\n\x03woe\x18\x02 \x03(\x01\x12\x0f\n\x07\x66\x65\x61ture\x18\x03 \x01(\t\x12\x1a\n\x02na\x18\x04 \x01(\x0b\x32\x0e.model.NaValue\"\x8f\x01\n\x08WOEModel\x12<\n\x0f\x66\x65\x61ture_binning\x18\x01 \x03(\x0b\x32#.model.WOEModel.FeatureBinningEntry\x1a\x45\n\x13\x46\x65\x61tureBinningEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x1d\n\x05value\x18\x02 \x01(\x0b\x32\x0e.model.Binning:\x02\x38\x01\"1\n\nNormalizer\x12\x0f\n\x07\x66\x65\x61ture\x18\x01 \x01(\t\x12\x12\n\nnorm_value\x18\x02 \x01(\x01\"7\n\x0eStandardScaler\x12\x0f\n\x07\x66\x65\x61ture\x18\x01 \x01(\t\x12\t\n\x01u\x18\x02 \x01(\x01\x12\t\n\x01s\x18\x03 \x01(\x01\"\xb5\x01\n\x12NormalizationModel\x12\x0c\n\x04\x61xis\x18\x01 \x01(\x05\x12\x0c\n\x04norm\x18\x02 \x01(\t\x12=\n\nnormalizer\x18\x03 \x03(\x0b\x32).model.NormalizationModel.NormalizerEntry\x1a\x44\n\x0fNormalizerEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.model.Normalizer:\x02\x38\x01\"\xae\x01\n\x14StandardizationModel\x12H\n\x0fstandard_scaler\x18\x02 \x03(\x0b\x32/.model.StandardizationModel.StandardScalerEntry\x1aL\n\x13StandardScalerEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.model.StandardScaler:\x02\x38\x01\x62\x06proto3'
)
_NAVALUE = _descriptor.Descriptor(
name='NaValue',
full_name='model.NaValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='ori', full_name='model.NaValue.ori', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='val', full_name='model.NaValue.val', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=65,
)
_BINNING = _descriptor.Descriptor(
name='Binning',
full_name='model.Binning',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='binning_split', full_name='model.Binning.binning_split', index=0,
number=1, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='woe', full_name='model.Binning.woe', index=1,
number=2, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='feature', full_name='model.Binning.feature', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='na', full_name='model.Binning.na', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=157,
)
_WOEMODEL_FEATUREBINNINGENTRY = _descriptor.Descriptor(
name='FeatureBinningEntry',
full_name='model.WOEModel.FeatureBinningEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='model.WOEModel.FeatureBinningEntry.key', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='model.WOEModel.FeatureBinningEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=234,
serialized_end=303,
)
_WOEMODEL = _descriptor.Descriptor(
name='WOEModel',
full_name='model.WOEModel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='feature_binning', full_name='model.WOEModel.feature_binning', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_WOEMODEL_FEATUREBINNINGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=160,
serialized_end=303,
)
_NORMALIZER = _descriptor.Descriptor(
name='Normalizer',
full_name='model.Normalizer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='feature', full_name='model.Normalizer.feature', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='norm_value', full_name='model.Normalizer.norm_value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=305,
serialized_end=354,
)
_STANDARDSCALER = _descriptor.Descriptor(
name='StandardScaler',
full_name='model.StandardScaler',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='feature', full_name='model.StandardScaler.feature', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='u', full_name='model.StandardScaler.u', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='s', full_name='model.StandardScaler.s', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=356,
serialized_end=411,
)
_NORMALIZATIONMODEL_NORMALIZERENTRY = _descriptor.Descriptor(
name='NormalizerEntry',
full_name='model.NormalizationModel.NormalizerEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='model.NormalizationModel.NormalizerEntry.key', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='model.NormalizationModel.NormalizerEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=527,
serialized_end=595,
)
_NORMALIZATIONMODEL = _descriptor.Descriptor(
name='NormalizationModel',
full_name='model.NormalizationModel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='model.NormalizationModel.axis', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='norm', full_name='model.NormalizationModel.norm', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='normalizer', full_name='model.NormalizationModel.normalizer', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_NORMALIZATIONMODEL_NORMALIZERENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=414,
serialized_end=595,
)
_STANDARDIZATIONMODEL_STANDARDSCALERENTRY = _descriptor.Descriptor(
name='StandardScalerEntry',
full_name='model.StandardizationModel.StandardScalerEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='model.StandardizationModel.StandardScalerEntry.key', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='model.StandardizationModel.StandardScalerEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=696,
serialized_end=772,
)
_STANDARDIZATIONMODEL = _descriptor.Descriptor(
name='StandardizationModel',
full_name='model.StandardizationModel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='standard_scaler', full_name='model.StandardizationModel.standard_scaler', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_STANDARDIZATIONMODEL_STANDARDSCALERENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=598,
serialized_end=772,
)
_BINNING.fields_by_name['na'].message_type = _NAVALUE
_WOEMODEL_FEATUREBINNINGENTRY.fields_by_name['value'].message_type = _BINNING
_WOEMODEL_FEATUREBINNINGENTRY.containing_type = _WOEMODEL
_WOEMODEL.fields_by_name['feature_binning'].message_type = _WOEMODEL_FEATUREBINNINGENTRY
_NORMALIZATIONMODEL_NORMALIZERENTRY.fields_by_name['value'].message_type = _NORMALIZER
_NORMALIZATIONMODEL_NORMALIZERENTRY.containing_type = _NORMALIZATIONMODEL
_NORMALIZATIONMODEL.fields_by_name['normalizer'].message_type = _NORMALIZATIONMODEL_NORMALIZERENTRY
_STANDARDIZATIONMODEL_STANDARDSCALERENTRY.fields_by_name['value'].message_type = _STANDARDSCALER
_STANDARDIZATIONMODEL_STANDARDSCALERENTRY.containing_type = _STANDARDIZATIONMODEL
_STANDARDIZATIONMODEL.fields_by_name['standard_scaler'].message_type = _STANDARDIZATIONMODEL_STANDARDSCALERENTRY
DESCRIPTOR.message_types_by_name['NaValue'] = _NAVALUE
DESCRIPTOR.message_types_by_name['Binning'] = _BINNING
DESCRIPTOR.message_types_by_name['WOEModel'] = _WOEMODEL
DESCRIPTOR.message_types_by_name['Normalizer'] = _NORMALIZER
DESCRIPTOR.message_types_by_name['StandardScaler'] = _STANDARDSCALER
DESCRIPTOR.message_types_by_name['NormalizationModel'] = _NORMALIZATIONMODEL
DESCRIPTOR.message_types_by_name['StandardizationModel'] = _STANDARDIZATIONMODEL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NaValue = _reflection.GeneratedProtocolMessageType('NaValue', (_message.Message,), {
'DESCRIPTOR' : _NAVALUE,
'__module__' : 'feature_model_pb2'
# @@protoc_insertion_point(class_scope:model.NaValue)
})
_sym_db.RegisterMessage(NaValue)
Binning = _reflection.GeneratedProtocolMessageType('Binning', (_message.Message,), {
'DESCRIPTOR' : _BINNING,
'__module__' : 'feature_model_pb2'
# @@protoc_insertion_point(class_scope:model.Binning)
})
_sym_db.RegisterMessage(Binning)
WOEModel = _reflection.GeneratedProtocolMessageType('WOEModel', (_message.Message,), {
'FeatureBinningEntry' : _reflection.GeneratedProtocolMessageType('FeatureBinningEntry', (_message.Message,), {
'DESCRIPTOR' : _WOEMODEL_FEATUREBINNINGENTRY,
'__module__' : 'feature_model_pb2'
# @@protoc_insertion_point(class_scope:model.WOEModel.FeatureBinningEntry)
})
,
'DESCRIPTOR' : _WOEMODEL,
'__module__' : 'feature_model_pb2'
# @@protoc_insertion_point(class_scope:model.WOEModel)
})
_sym_db.RegisterMessage(WOEModel)
_sym_db.RegisterMessage(WOEModel.FeatureBinningEntry)
Normalizer = _reflection.GeneratedProtocolMessageType('Normalizer', (_message.Message,), {
'DESCRIPTOR' : _NORMALIZER,
'__module__' : 'feature_model_pb2'
# @@protoc_insertion_point(class_scope:model.Normalizer)
})
_sym_db.RegisterMessage(Normalizer)
StandardScaler = _reflection.GeneratedProtocolMessageType('StandardScaler', (_message.Message,), {
'DESCRIPTOR' : _STANDARDSCALER,
'__module__' : 'feature_model_pb2'
# @@protoc_insertion_point(class_scope:model.StandardScaler)
})
_sym_db.RegisterMessage(StandardScaler)
NormalizationModel = _reflection.GeneratedProtocolMessageType('NormalizationModel', (_message.Message,), {
'NormalizerEntry' : _reflection.GeneratedProtocolMessageType('NormalizerEntry', (_message.Message,), {
'DESCRIPTOR' : _NORMALIZATIONMODEL_NORMALIZERENTRY,
'__module__' : 'feature_model_pb2'
# @@protoc_insertion_point(class_scope:model.NormalizationModel.NormalizerEntry)
})
,
'DESCRIPTOR' : _NORMALIZATIONMODEL,
'__module__' : 'feature_model_pb2'
# @@protoc_insertion_point(class_scope:model.NormalizationModel)
})
_sym_db.RegisterMessage(NormalizationModel)
_sym_db.RegisterMessage(NormalizationModel.NormalizerEntry)
StandardizationModel = _reflection.GeneratedProtocolMessageType('StandardizationModel', (_message.Message,), {
'StandardScalerEntry' : _reflection.GeneratedProtocolMessageType('StandardScalerEntry', (_message.Message,), {
'DESCRIPTOR' : _STANDARDIZATIONMODEL_STANDARDSCALERENTRY,
'__module__' : 'feature_model_pb2'
# @@protoc_insertion_point(class_scope:model.StandardizationModel.StandardScalerEntry)
})
,
'DESCRIPTOR' : _STANDARDIZATIONMODEL,
'__module__' : 'feature_model_pb2'
# @@protoc_insertion_point(class_scope:model.StandardizationModel)
})
_sym_db.RegisterMessage(StandardizationModel)
_sym_db.RegisterMessage(StandardizationModel.StandardScalerEntry)
_WOEMODEL_FEATUREBINNINGENTRY._options = None
_NORMALIZATIONMODEL_NORMALIZERENTRY._options = None
_STANDARDIZATIONMODEL_STANDARDSCALERENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 20,317 | 37.700952 | 1,464 | py |
XFL | XFL-master/python/common/model/python/linear_model_pb2.py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: linear_model.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='linear_model.proto',
package='model',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x12linear_model.proto\x12\x05model\")\n\tStateDict\x12\x0e\n\x06weight\x18\x01 \x03(\x01\x12\x0c\n\x04\x62ias\x18\x02 \x01(\x01\"N\n\x0bLinearModel\x12$\n\nstate_dict\x18\x01 \x01(\x0b\x32\x10.model.StateDict\x12\x19\n\x11suggest_threshold\x18\x02 \x01(\x01\x62\x06proto3'
)
_STATEDICT = _descriptor.Descriptor(
name='StateDict',
full_name='model.StateDict',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='weight', full_name='model.StateDict.weight', index=0,
number=1, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bias', full_name='model.StateDict.bias', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=70,
)
_LINEARMODEL = _descriptor.Descriptor(
name='LinearModel',
full_name='model.LinearModel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='state_dict', full_name='model.LinearModel.state_dict', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='suggest_threshold', full_name='model.LinearModel.suggest_threshold', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=72,
serialized_end=150,
)
_LINEARMODEL.fields_by_name['state_dict'].message_type = _STATEDICT
DESCRIPTOR.message_types_by_name['StateDict'] = _STATEDICT
DESCRIPTOR.message_types_by_name['LinearModel'] = _LINEARMODEL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StateDict = _reflection.GeneratedProtocolMessageType('StateDict', (_message.Message,), {
'DESCRIPTOR' : _STATEDICT,
'__module__' : 'linear_model_pb2'
# @@protoc_insertion_point(class_scope:model.StateDict)
})
_sym_db.RegisterMessage(StateDict)
LinearModel = _reflection.GeneratedProtocolMessageType('LinearModel', (_message.Message,), {
'DESCRIPTOR' : _LINEARMODEL,
'__module__' : 'linear_model_pb2'
# @@protoc_insertion_point(class_scope:model.LinearModel)
})
_sym_db.RegisterMessage(LinearModel)
# @@protoc_insertion_point(module_scope)
| 4,222 | 32.515873 | 293 | py |
XFL | XFL-master/python/common/model/python/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/__init__.py | 0 | 0 | 0 | py | |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_sampler/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_sampler_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_sampler"
},
"input": {
"dataset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"sample_id": {
"name": String("sampled_id_[STAGE_ID].json")
},
"dataset": {
"name": String("sampled_data_[STAGE_ID].csv")
}
},
"train_info": {
"train_params": {
"__rule__": [Optional("marketing_specified"), Required("method", "strategy", "random_seed", "fraction")],
"method": OneOf("random", "stratify").set_default_index(0),
"strategy": OneOf("downsample", "upsample").set_default_index(0),
"random_seed": int(42),
"fraction": {
"__rule__": OneOf("number", "percentage", "labeled_percentage").set_default_index(1),
"number": Integer(),
"percentage": Float(0.4),
"labeled_percentage": [RepeatableSomeOf([Integer(), Float()])]
},
"marketing_specified": {
"threshold_method": OneOf("number", "score", "percentage").set_default_index(2),
"threshold": OneOf(Integer(), Float(0.4)).set_default_index(1)
}
}
}
}
| 1,801 | 35.04 | 117 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_sampler/sync.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_sampler_sync_rule = {
}
| 203 | 28.142857 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_sampler/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_sampler_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_sampler"
},
"input": {
"dataset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"sample_id": {
"name": String("sampled_id_[STAGE_ID].json")
},
"dataset": {
"name": String("sampled_data_[STAGE_ID].csv")
}
},
"train_info": {
}
}
| 922 | 26.147059 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_binning_woe_iv/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_binning_woe_iv_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_binning_woe_iv"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True),
"nan_list": [Optional(RepeatableSomeOf(Any()))]
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"iv": {
"name": String("woe_iv_result_[STAGE_ID].json")
},
"split_points": {
"name": String("binning_split_points_[STAGE_ID].json")
}
},
"train_info": {
"train_params": {
"encryption": {
"__rule__": OneOf("paillier", "plain").set_default_index(0),
"paillier": {
"key_bit_size": OneOf(2048, 4096, 8192).set_default_index(0),
"precision": Optional(Integer(7)).set_default_not_none(),
"djn_on": Bool(True),
"parallelize_on": Bool(True)
},
"plain": {}
},
"binning": {
"method": OneOf("equal_frequency", "equal_width").set_default_index(1),
"bins": Integer(5)
}
}
}
}
| 1,674 | 31.211538 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_binning_woe_iv/sync.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
vertical_binning_woe_iv_sync_rule = {
"train_info": All()
}
| 139 | 22.333333 | 73 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_binning_woe_iv/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_binning_woe_iv_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_binning_woe_iv"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False),
"nan_list": [Optional(RepeatableSomeOf(Any()))]
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"split_points": {
"name": String("binning_split_points_[STAGE_ID].json")
}
},
"train_info": {
"train_params": {
"max_num_cores": Integer(2)
}
}
}
| 1,005 | 27.742857 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_poisson_regression/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_poisson_regression_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_poisson_regression"
},
"input": {
"__rule__": [Optional("pretrained_model"), Required("trainset", "valset")],
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_indices(0)
],
"pretrained_model": {
"path": String(""),
"name": String("")
}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_poisson_regression_[STAGE_ID].model")
},
"metric_train": {
"name": String("pr_metric_train_[STAGE_ID].csv")
},
"metric_val": {
"name": String("pr_metric_val_[STAGE_ID].csv")
},
"prediction_train": {
"name": String("pr_prediction_train_[STAGE_ID].csv")
},
"prediction_val": {
"name": String("pr_prediction_val_[STAGE_ID].csv")
},
"feature_importance": {
"name": String("pr_feature_importance_[STAGE_ID].csv")
}
},
"train_info": {
"interaction_params": {
"save_frequency": Integer(-1).ge(-1),
"echo_training_metrics": Bool(True),
"write_training_prediction": Bool(True),
"write_validation_prediction": Bool(True)
},
"train_params": {
"global_epoch": Integer(10),
"batch_size": Integer(128),
"encryption": {
"__rule__": OneOf("ckks", "paillier", "plain").set_default("ckks"),
"ckks": {
"poly_modulus_degree": Integer(8192),
"coeff_mod_bit_sizes": [
RepeatableSomeOf(Integer()).set_default([60, 40, 40, 60])
],
"global_scale_bit_size": Integer(40)
},
"paillier": {
"key_bit_size": OneOf(2048, 4096, 8192).set_default_index(0),
"precision": Optional(Integer(7).ge(1)).set_default_not_none(),
"djn_on": Bool(True),
"parallelize_on": Bool(True)
},
"plain": {}
},
"optimizer": {
"lr": Float(0.01),
"p": OneOf(0, 1, 2).set_default(2),
"alpha": Float(1e-4)
},
"metric": {
"mse": {},
"mape": {},
"mae": {},
"rmse": {}
},
"early_stopping": {
"key": OneOf("mse", "mape", "mae", "rmse", "loss").set_default_index(-1),
"patience": Integer(10),
"delta": Float(0.001)
},
"random_seed": Optional(Integer(50))
}
}
}
| 3,630 | 32.934579 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_poisson_regression/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_poisson_regression_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "vertical_poisson_regression"
}
}
| 321 | 28.272727 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_poisson_regression/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_poisson_regression_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_poisson_regression"
},
"input": {
"__rule__": [Optional("pretrained_model"), Required("trainset", "valset")],
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_indices(0)
],
"pretrained_model": {
"path": String(""),
"name": String("")
}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_poisson_regression_[STAGE_ID].model")
}
},
"train_info": {
}
}
| 1,392 | 28.020833 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/local_data_statistic/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
local_data_statistic_rule = {
"identity": "label_trainer",
"model_info": {
"name": "local_data_statistic"
},
"input": {
"dataset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"summary": {
"name": String("data_summary_[STAGE_ID].json")
}
},
"train_info": {
"train_params": {
"quantile": [RepeatableSomeOf(Float(0.25))]
}
}
} | 929 | 26.352941 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_kmeans/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_kmeans_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_kmeans"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_kmeans_[STAGE_ID].pkl")
},
"result": {
"name": String("cluster_result_[STAGE_ID].csv")
},
"summary": {
"name": String("cluster_summary_[STAGE_ID].csv")
}
},
"train_info": {
"__rule__": Optional("train_params"),
"train_params": {
"init": OneOf("random", "kmeans++").set_default("random"),
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
"k": Integer(5),
"max_iter": Integer(50),
"tol": Float(1e-6),
"random_seed": Float(50)
}
}
}
| 2,041 | 31.935484 | 109 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_kmeans/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_kmeans_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "vertical_kmeans"
},
"input": {
},
"output": {
},
"train_info": {
}
}
| 371 | 19.666667 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_kmeans/sync.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_kmeans_rule = {
"train_info": All()
}
| 218 | 23.333333 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_kmeans/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_kmeans_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_kmeans"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_kmeans_[STAGE_ID].pkl")
},
"result": {
"name": String("cluster_result_[STAGE_ID].csv")
},
"summary": {
"name": String("cluster_summary_[STAGE_ID].csv")
}
},
"train_info": {
}
}
| 1,025 | 25.307692 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_kmeans/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
horizontal_kmeans_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "horizontal_kmeans"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(False)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"metric_train": {
"name": String("kmeans_metric_train_[STAGE_ID].csv")
}
},
"train_info": {
"train_params": {
"local_epoch": Integer(1)
}
}
}
| 929 | 25.571429 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_kmeans/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_kmeans_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_kmeans",
"config": {
"input_dim": Integer(),
"num_clusters": Integer(3)
}
},
"input": {
"__rule__": [Optional("pretrain_model"), Required("valset")],
"valset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(False)
}
).set_default_index(0)
],
"pretrain_model": {}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_kmeans_[STAGE_ID].model")
},
"metric_val": {
"name": String("kmeans_metric_val_[STAGE_ID].csv")
}
},
"train_info": {
"train_params": {
"global_epoch": Integer(20),
"aggregation": {
"method": {
"__rule__": OneOf("fedavg", "fedprox", "scaffold").set_default_index(0),
"fedavg": {},
"fedprox": {
"mu": Float(0.1)
},
"scaffold": {}
}
},
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
}
}
}
}
| 2,660 | 34.013158 | 109 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_linear_regression/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
horizontal_linear_regression_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "horizontal_linear_regression"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(True)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_linear_regression_[STAGE_ID].model")
},
"metric_train": {
"name": String("lr_metric_train_[STAGE_ID].csv")
}
},
"train_info": {
"device": OneOf("cpu", "cuda:0"),
"train_params": {
"local_epoch": Integer(1),
"train_batch_size": Integer(64),
}
}
}
| 1,140 | 27.525 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_linear_regression/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_linear_regression_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_linear_regression",
"config": {
"input_dim": Integer(),
"bias": Bool(True)
}
},
"input": {
"__rule__": [Optional("pretrain_model"), Required("valset")],
"valset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(True)
}
).set_default_index(0)
],
"pretrain_model": {}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_linear_regression_[STAGE_ID].model")
},
"metric_val": {
"name": String("lr_metric_val_[STAGE_ID].csv")
}
},
"train_info": {
"device": OneOf("cpu", "cuda:0"),
"interaction_params": {
"__rule__": [Optional("save_frequency")],
"save_frequency": Integer(1),
},
"train_params": {
"global_epoch": Integer(20),
"val_batch_size": Integer(128),
"aggregation": {
"method": {
"__rule__": OneOf("fedavg", "fedprox", "scaffold").set_default_index(0),
"fedavg": {},
"fedprox": {
"mu": Float(0.1)
},
"scaffold": {}
}
},
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
"optimizer": {
"__rule__": OneOf(*list(optimizer.keys())).set_default("Adam"),
},
"lr_scheduler": {
"__rule__": OneOf(*list(lr_scheduler.keys())).set_default("StepLR")
},
"lossfunc": {
"L1Loss": lossfunc["L1Loss"]
},
"metric": {
"mae": metrics["mae"],
"mse": metrics["mse"],
"mape": metrics["mape"]
},
"early_stopping": {
},
}
}
}
update_dict(horizontal_linear_regression_assist_trainer_rule["train_info"]["train_params"]["optimizer"], optimizer)
update_dict(horizontal_linear_regression_assist_trainer_rule["train_info"]["train_params"]["lr_scheduler"], lr_scheduler)
| 3,686 | 35.50495 | 121 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_logistic_regression/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
horizontal_logistic_regression_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "horizontal_logistic_regression"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(True)
}
).set_default_index(0)
]
},
"output": {
"__rule__": [Optional("model"), Optional("onnx_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_logitstic_regression_[STAGE_ID].model")
},
"onnx_model": {
"name": String("horizontal_logitstic_regression_[STAGE_ID].onnx")
},
"metric_train": {
"name": String("lr_metric_train_[STAGE_ID].csv")
}
},
"train_info": {
"device": OneOf("cpu", "cuda:0"),
"train_params": {
"local_epoch": Integer(1),
"train_batch_size": Integer(64),
}
}
} | 1,324 | 29.813953 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_logistic_regression/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_logistic_regression_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_logistic_regression",
"config": {
"input_dim": Integer(),
"bias": Bool(True)
}
},
"input": {
"__rule__": [Optional("pretrain_model"), Required("valset")],
"valset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(True)
}
).set_default_index(0)
],
"pretrain_model": {}
},
"output": {
"__rule__": [Optional("model"), Optional("onnx_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_logitstic_regression_[STAGE_ID].model")
},
"onnx_model": {
"name": String("horizontal_logitstic_regression_[STAGE_ID].onnx")
},
"metric_val": {
"name": String("lr_metric_val_[STAGE_ID].csv")
}
},
"train_info": {
"device": OneOf("cpu", "cuda:0"),
"interaction_params": {
"__rule__": [Optional("save_frequency")],
"save_frequency": Integer(1),
},
"train_params": {
"global_epoch": Integer(20),
"val_batch_size": Integer(128),
"aggregation": {
"method": {
"__rule__": OneOf("fedavg", "fedprox", "scaffold").set_default_index(0),
"fedavg": {},
"fedprox": {
"mu": Float(0.1)
},
"scaffold": {}
}
},
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
"optimizer": {
"__rule__": OneOf(*list(optimizer.keys())).set_default("Adam"),
},
"lr_scheduler": {
"__rule__": OneOf(*list(lr_scheduler.keys())).set_default("StepLR")
},
"lossfunc": {
"BCELoss": lossfunc["BCELoss"]
},
"metric": {
"acc": metrics["acc"],
"precision": metrics["precision"],
"recall": metrics["recall"],
"f1_score": metrics["f1_score"],
"auc": metrics["auc"],
"ks": metrics["ks"]
},
"early_stopping": {
"key": OneOf("acc", "precision", "recall", "f1_score", "auc", "ks").set_default_index(-1).add_rule(lambda x, y: x in y["train_info"]["train_params"]["metric"].keys(), "should in metric"),
"patience": Integer(10).ge(-1),
"delta": Float(0.001).gt(0)
},
}
}
}
update_dict(horizontal_logistic_regression_assist_trainer_rule["train_info"]["train_params"]["optimizer"], optimizer)
update_dict(horizontal_logistic_regression_assist_trainer_rule["train_info"]["train_params"]["lr_scheduler"], lr_scheduler)
| 4,321 | 36.912281 | 203 | py |
XFL | XFL-master/python/algorithm/config_descriptor/local_standard_scaler/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
local_standard_scaler_rule = {
"identity": "label_trainer",
"model_info": {
"name": "local_standard_scaler"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
],
"valset": [
Optional(OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
).set_default_not_none()
]
},
"output": {
"__rule__": [SomeOf("model", "proto_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("local_standard_scaler_[STAGE_ID].model")
},
"proto_model": {
"name": String("local_standard_scaler_[STAGE_ID].pmodel")
},
"trainset": {
"name": String("standardized_train_[STAGE_ID].csv")
},
"valset": {
"name": String("standardized_val_[STAGE_ID].csv")
}
},
"train_info": {
"train_params": {
"with_mean": Bool(True),
"with_std": Bool(True),
"feature_standard": {
"__rule__": Optional(String()),
String(): {
"with_mean": Bool(False),
"with_std": Bool(False)
}
}
}
}
} | 1,939 | 29.3125 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost_infer/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_infer_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_xgboost"
},
"inference": True,
"input": {
"testset": [
Optional(
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
)
],
"pretrained_model": {
"path": String(""),
"name": String("")
}
},
"output": {
"__rule__": [Optional("path"), Optional("testset")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"testset": {
"name": String("xgb_prediction_test_[STAGE_ID].csv")
}
},
"train_info": {
"train_params": {
"batch_size_val": Integer(40960)
}
}
}
| 1,197 | 26.860465 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost_infer/sync.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_infer_sync_rule = {
"train_info": {
"train_params": {
"batch_size_val": All()
}
}
}
| 303 | 24.333333 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost_infer/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_infer_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost"
},
"inference": True,
"input": {
"testset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
],
"pretrained_model": {
"path": String(""),
"name": String("")
}
},
"output": {
"__rule__": [Optional("path"), Optional("testset")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"testset": {
"name": String("xgb_prediction_test_[STAGE_ID].csv")
}
},
"train_info": {
}
}
| 1,034 | 25.538462 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_pearson/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_pearson_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_pearson"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"corr": {
"name": String("vertical_pearson_[STAGE_ID].pkl")
}
},
"train_info": {
"train_params": {
"col_index": OneOf(-1, [RepeatableSomeOf(Integer())]),
"col_names": String(""),
"encryption": {
"paillier": {
"key_bit_size": OneOf(2048, 4096, 8192).set_default_index(0),
"precision": Optional(Integer(7)).set_default_not_none(),
"djn_on": Bool(True),
"parallelize_on": Bool(True)
},
},
"max_num_cores": Integer(999),
"sample_size": Integer(9999)
}
}
}
| 1,412 | 29.717391 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_pearson/sync.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_pearson_sync_rule = {
"train_info": All()
}
| 222 | 30.857143 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_pearson/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_pearson_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_pearson"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"corr": {
"name": String("vertical_pearson_[STAGE_ID].pkl")
}
},
"train_info": {
}
}
| 833 | 25.903226 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_poisson_regression/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
horizontal_poisson_regression_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "horizontal_poisson_regression"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(True)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_poisson_regression_[STAGE_ID].model")
},
"metric_train": {
"name": String("pr_metric_train_[STAGE_ID].csv")
}
},
"train_info": {
"device": OneOf("cpu", "cuda:0"),
"train_params": {
"local_epoch": Integer(1),
"train_batch_size": Integer(64),
}
}
}
| 1,143 | 27.6 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_poisson_regression/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_poisson_regression_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_poisson_regression",
"config": {
"input_dim": Integer(),
"bias": Bool(True)
}
},
"input": {
"__rule__": [Optional("pretrain_model"), Required("valset")],
"valset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(True)
}
).set_default_index(0)
],
"pretrain_model": {}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("horizontal_poisson_regression_[STAGE_ID].model")
},
"metric_val": {
"name": String("pr_metric_val_[STAGE_ID].csv")
}
},
"train_info": {
"device": OneOf("cpu", "cuda:0"),
"interaction_params": {},
"train_params": {
"global_epoch": Integer(20),
"val_batch_size": Integer(128),
"aggregation": {
"method": {
"__rule__": OneOf("fedavg", "fedprox", "scaffold").set_default_index(0),
"fedavg": {},
"fedprox": {
"mu": Float(0.1)
},
"scaffold": {}
}
},
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
"optimizer": {
"__rule__": OneOf(*list(optimizer.keys())).set_default("Adam"),
},
"lr_scheduler": {
"__rule__": OneOf(*list(lr_scheduler.keys())).set_default("StepLR")
},
"lossfunc": {
"PoissonNLLLoss": lossfunc["PoissonNLLLoss"]
},
"metric": {
"mean_poisson_deviance": metrics["mean_poisson_deviance"]
},
"early_stopping": {}
}
}
}
update_dict(horizontal_poisson_regression_assist_trainer_rule["train_info"]["train_params"]["optimizer"], optimizer)
update_dict(horizontal_poisson_regression_assist_trainer_rule["train_info"]["train_params"]["lr_scheduler"], lr_scheduler)
| 3,544 | 36.315789 | 122 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_linear_regression/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
# TODO: not ready
vertical_linear_regression_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_linear_regression"
},
"input": {
"__rule__": [Optional("pretrained_model"), Required("trainset", "valset")],
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_indices(0)
],
"pretrained_model": {
"path": String(""),
"name": String("")
}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_linear_regression_[STAGE_ID].model")
},
"onnx_model": {
"name": String("vertical_linear_regression_[STAGE_ID].onnx")
},
"metric_train": {
"name": String("linear_reg_metric_train_[STAGE_ID].csv")
},
"metric_val": {
"name": String("linear_reg_metric_val_[STAGE_ID].csv")
},
"prediction_train": {
"name": String("linear_reg_prediction_train_[STAGE_ID].csv")
},
"prediction_val": {
"name": String("linear_reg_prediction_val_[STAGE_ID].csv")
},
"feature_importance": {
"name": String("linear_reg_feature_importance_[STAGE_ID].csv")
}
},
"train_info": {
"interaction_params": {
"save_frequency": Integer(-1),
"echo_training_metrics": Bool(True),
"write_training_prediction": Bool(True),
"write_validation_prediction": Bool(True)
},
"train_params": {
"global_epoch": Integer(10),
"batch_size": Integer(2048),
"encryption": {
"__rule__": OneOf("ckks", "paillier", "plain").set_default("ckks"),
"ckks": {
"poly_modulus_degree": Integer(8192),
"coeff_mod_bit_sizes": [
RepeatableSomeOf(Integer()).set_default([60, 40, 40, 60])
],
"global_scale_bit_size": Integer(40)
},
"paillier": {
"key_bit_size": OneOf(2048, 4096, 8192).set_default_index(0),
"precision": Optional(Integer(7).ge(1)).set_default_not_none(),
"djn_on": Bool(True),
"parallelize_on": Bool(True)
},
"plain": {}
},
"metric": {
"mse": {},
"mape": {},
"mae": {},
"rmse": {}
},
"optimizer": {
"lr": Float(0.01),
"p": OneOf(0, 1, 2).set_default(2),
"alpha": Float(1e-4)
},
"early_stopping": {
"key": "loss",
"patience": Integer(-1),
"delta": Float(0)
},
"random_seed": Optional(Integer(50))
}
}
}
| 3,724 | 32.558559 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_linear_regression/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_linear_regression_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "vertical_linear_regression"
}
}
| 319 | 28.090909 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_linear_regression/sync.py | from x_types import String, Bool, Integer, Float, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_linear_regression_sync_rule = {
"train_info": All()
}
| 213 | 25.75 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_linear_regression/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_linear_regression_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_linear_regression"
},
"input": {
"__rule__": [Optional("pretrained_model"), Required("trainset", "valset")],
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_indices(0)
],
"pretrained_model": {
"path": String(""),
"name": String("")
}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_linear_regression_[STAGE_ID].model")
},
"onnx_model": {
"name": String("vertical_linear_regression_[STAGE_ID].onnx")
}
},
"train_info": {
}
}
| 1,499 | 28.411765 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/local_data_split/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
local_data_split_rule = {
"identity": "label_trainer",
"model_info": {
"name": "local_data_split"
},
"input": {
"dataset":
[
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_header": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
]
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"trainset":
{
"name": String("splitted_train_[STAGE_ID].csv")
},
"valset":
{
"name": String("splitted_val_[STAGE_ID].csv")
}
},
"train_info": {
"train_params":
{
"shuffle": Bool(True),
"max_num_cores": Integer(999),
"batch_size": Integer(100000),
"train_weight": Integer(8),
"val_weight": Integer(2)
}
}
}
| 1,191 | 24.913043 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_chatglm/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
horizontal_chatglm_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "horizontal_chatglm"
},
"input": {
"__rule__": [Optional("trainset"), Optional("adater_model"), Optional("pretrain_model")],
"trainset": [
{
"type": "QA",
"path": String()
}
],
"pretrained_model": {
"path": String()
},
"adapter_model": {
"path": String()
}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]")
},
"train_info": {
"train_params": {
"trainer": {
"per_device_train_batch_size": Integer(1),
"gradient_accumulation_steps": Integer(4),
"save_strategy": OneOf("steps", "no"),
"torch_compile": Bool(False),
"no_cuda": Bool(False)
}
}
}
}
| 1,127 | 27.2 | 97 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_chatglm/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_chatglm_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_chatglm"
},
"input": {
"__rule__": [Optional("trainset"), Optional("adater_model"), Optional("pretrain_model")],
"trainset": [
{
"type": "QA",
"path": String()
}
],
"pretrained_model": {
"path": String()
},
"adapter_model": {
"path": String()
}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]")
},
"train_info": {
"train_params": {
"aggregation": {
"agg_steps": float(0.2)
},
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
"peft": {
"__rule__": OneOf("LORA", "PREFIX_TUNING", "ADALOARA"),
"LORA": {
"task_type": "CAUSAL_LM",
"r": Integer(8),
"target_modules": ["query_key_value"],
"lora_alpha": Integer(32),
"lora_dropout": Float(0.1),
"fan_in_fan_out": Bool(False),
"bias": OneOf("none", "all", "loral_only"),
"modules_to_save": None
},
"PREFIX_TUNING": {
"task_type": "CAUSAL_LM",
"pre_seq_len": Integer(20),
"prefix_projection": Bool(False)
},
"ADALORA": {
"task_type": "CAUSAL_LM",
"r": Integer(8),
"target_modules": ["query_key_value"],
"lora_alpha": Integer(32),
"lora_dropout": Float(0.1),
"fan_in_fan_out": Bool(False),
"bias": OneOf("none", "all", "loral_only"),
"modules_to_save": None,
"target_r": Integer(8),
"init_r": Integer(12),
"tinit": Integer(0),
"tfinal": Integer(0),
"deltaT": Integer(1),
"beta1": Float(0.85),
"beta2": Float(0.85),
"orth_reg_weight": Float(0.5)
}
},
"trainer": {
"per_device_train_batch_size": Integer(1),
"gradient_accumulation_steps": Integer(4),
"learning_rate": Float(1e-4),
"weight_decay": Float(0),
"adam_beta1": Float(0.9),
"adam_beta2": Float(0.999),
"adam_epsilon": Float(1e-8),
"max_grad_norm": Float(1.0),
"num_train_epochs": Integer(2),
"save_strategy": OneOf("steps", "no"),
"torch_compile": Bool(False),
"no_cuda": Bool(False),
"seed": Integer(42)
},
"dataset": {
"max_src_length": Integer(100),
"max_dst_length": Integer(100),
"ignore_pad_token_for_loss": Bool(True)
}
}
}
}
| 4,378 | 37.412281 | 109 | py |
XFL | XFL-master/python/algorithm/config_descriptor/local_normalization/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
local_normalization_rule = {
"identity": "label_trainer",
"model_info": {
"name": "local_normalization"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
],
"valset": [
Optional(OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
).set_default_not_none()
]
},
"output": {
"__rule__": [SomeOf("model", "proto_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("local_normalization_[STAGE_ID].model")
},
"proto_model": {
"name": String("local_normalization_[STAGE_ID].pmodel")
},
"trainset": {
"name": String("normalized_train_[STAGE_ID].csv")
},
"valset": {
"name": String("normalized_val_[STAGE_ID].csv")
}
},
"train_info": {
"train_params": {
"__rule__": [Optional("feature_norm"), Required("norm", "axis")],
"norm": OneOf("l1", "l2", "max").set_default_index(0),
"axis": OneOf(0, 1).set_default(0),
"feature_norm": {
"__rule__": Optional(RepeatableSomeOf(String(""))),
String(""): {
"norm": OneOf("l1", "l2", "max").set_default_index(0)
}
}
}
}
}
| 2,050 | 30.553846 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_feature_selection/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_feature_selection_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_feature_selection"
},
"input": {
"__rule__": [Optional("corr_result", "valset"), Required("trainset", "iv_result")],
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_indices(0)
],
"iv_result": {
"path": String(""),
"name": String("")
},
"corr_result": {
"path": String(""),
"name": String("")
}
},
"output": {
"__rule__": [Optional("valset"), Required("path", "trainset", "model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"trainset": {
"name": String("selected_train_[STAGE_ID].csv")
},
"valset": {
"name": String("selected_val_[STAGE_ID].csv")
},
"model": {
"name": String("vertical_feature_selection_[STAGE_ID].pkl")
}
},
"train_info": {
"train_params": {
"filter": {
"__rule__": [Optional("correlation").add_rule(lambda x, y: "corr_result" in y["input"].keys()), Required("common")],
"common": {
"metrics": "iv",
"filter_method": "threshold",
"threshold": Float(0.01)
},
"correlation": {
"sort_metric": "iv",
"correlation_threshold": Float(0.7)
}
}
}
}
}
| 2,296 | 30.465753 | 132 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_feature_selection/sync.py | from common.checker.x_types import String, Bool, Integer, Float, Any, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_feature_selection_sync_rule = {
"train_info": All()
}
| 232 | 32.285714 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_feature_selection/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_feature_selection_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_feature_selection"
},
"input": {
"__rule__": [Optional("corr_result", "valset"), Required("trainset", "iv_result")],
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_indices(0)
],
"iv_result": {
"path": String(""),
"name": String("")
},
"corr_result": {
"path": String(""),
"name": String("")
}
},
"output": {
"__rule__": [Optional("valset"), Required("path", "trainset", "model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"trainset": {
"name": String("selected_train_[STAGE_ID].csv")
},
"valset": {
"name": String("selected_val_[STAGE_ID].csv")
},
"model": {
"name": String("vertical_feature_selection_[STAGE_ID].pkl")
}
},
"train_info": {
}
}
| 1,751 | 29.206897 | 91 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_xgboost"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_indices(0)
]
},
"output": {
"__rule__": [SomeOf("model", "proto_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_xgboost_[STAGE_ID].model")
},
"proto_model": {
"name": String("vertical_xgboost_[STAGE_ID].pmodel")
},
"metric_train": {
"name": String("xgb_metric_train_[STAGE_ID].csv")
},
"metric_val": {
"name": String("xgb_metric_val_[STAGE_ID].csv")
},
"prediction_train": {
"name": String("xgb_prediction_train_[STAGE_ID].csv")
},
"prediction_val": {
"name": String("xgb_prediction_val_[STAGE_ID].csv")
},
"ks_plot_train": {
"name": String("xgb_ks_plot_train_[STAGE_ID].csv")
},
"ks_plot_val": {
"name": String("xgb_ks_plot_val[STAGE_ID].csv")
},
"decision_table_train": {
"name": String("xgb_decision_table_train_[STAGE_ID].csv")
},
"decision_table_val": {
"name": String("xgb_decision_table_val_[STAGE_ID].csv")
},
"feature_importance": {
"name": String("xgb_feature_importance_[STAGE_ID].csv")
},
"plot_ks": {
"name": "xgb_plot_ks_[STAGE_ID].json"
},
"plot_roc": {
"name": "xgb_plot_roc_[STAGE_ID].json"
},
"plot_lift": {
"name": "xgb_plot_lift_[STAGE_ID].json"
},
"plot_gain": {
"name": "xgb_plot_gain_[STAGE_ID].json"
},
"plot_precision_recall": {
"name": "xgb_plot_precision_recall_[STAGE_ID].json"
},
"plot_feature_importance": {
"name": "xgb_plot_feature_importance_[STAGE_ID].json"
},
"plot_loss": {
"name": "xgb_plot_loss_[STAGE_ID].json"
}
},
"train_info": {
"interaction_params": {
"save_frequency": Integer(-1).ge(-1),
"echo_training_metrics": Bool(True),
"write_training_prediction": Bool(True),
"write_validation_prediction": Bool(True)
},
"train_params": {
"lossfunc": {
"__rule__": OneOf("BCEWithLogitsLoss").set_default_index(0),
"BCEWithLogitsLoss": {}
},
"num_trees": Integer(30).ge(1),
"learning_rate": Float(0.3).gt(0),
"gamma": Float(0),
"lambda_": Float(1.0),
"max_depth": Integer(3).ge(1),
"num_bins": Integer(16).ge(2).le(65535),
"min_split_gain": Float(0).ge(0),
"min_sample_split": Integer(20).ge(1),
"feature_importance_type": OneOf("gain", "split").set_default_index(0),
"max_num_cores": Integer(999).ge(1),
"batch_size_val": Integer(40960).ge(1),
"downsampling": {
"column": {
"rate": Float(1.0).gt(0).le(1)
},
"row": {
"run_goss": Bool(True),
"top_rate": Float(0.4).gt(0).le(1),
"other_rate": Float(0.4).gt(0).le(1).add_rule(lambda x, y: x + y["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] <= 1, "top_rate + other_rate <=1")
}
},
"category": {
"cat_smooth": Float(1.0),
"cat_features": {
"col_index": String(""),
"col_names": [Optional(RepeatableSomeOf(String("")))],
"max_num_value": Integer(0).ge(0),
"col_index_type": OneOf("inclusive", "exclusive").set_default_index(0),
"col_names_type": OneOf("inclusive", "exclusive").set_default_index(0),
"max_num_value_type": OneOf("intersection", "union").set_default_index(1)
}
},
"metric": {
"__rule__": [Optional("decision_table"), Required("acc", "precision", "recall", "f1_score", "auc", "ks")],
"acc": {},
"precision": {},
"recall": {},
"f1_score": {},
"auc": {},
"ks": {},
"decision_table": {
"method": OneOf("equal_frequency", "equal_width").set_default_index(0),
"bins": Integer(10).ge(2)
}
},
"early_stopping": {
# 这里的key必须是在metric里配置过的key
"key": OneOf("acc", "precision", "recall", "f1_score", "auc", "ks").set_default_index(-1).add_rule(lambda x, y: x in y["train_info"]["train_params"]["metric"].keys(), "should in metric"),
"patience": Integer(10).ge(-1),
"delta": Float(0.001).gt(0)
},
"encryption": {
"__rule__": OneOf("paillier", "plain").set_default_index(0),
"paillier": {
"key_bit_size": OneOf(2048, 4096, 8192).set_default_index(0),
"precision": Optional(Integer(7).ge(1)).set_default_not_none(),
"djn_on": Bool(True),
"parallelize_on": Bool(True)
},
"plain": {}
}
}
}
}
| 6,331 | 36.247059 | 203 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost/sync.py | from x_types import String, Bool, Integer, Float, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_sync_rule = {
"train_info": {
"interaction_params": All(),
"train_params": {
"lossfunc": All(),
"num_trees": All(),
"num_bins": All(),
"batch_size_val": All(),
"downsampling": {
"row": {
"run_goss": All()
}
},
"encryption": All()
}
}
}
| 569 | 22.75 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_indices(0)
]
},
"output": {
"__rule__": [SomeOf("model", "proto_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_xgboost_[STAGE_ID].model")
},
"proto_model": {
"name": String("vertical_xgboost_[STAGE_ID].pmodel")
}
},
"train_info": {
"train_params": {
"max_num_cores": Integer(999).ge(1),
"downsampling": {
"column": {
"rate": Float(1.0).gt(0).le(1)
}
},
"category": {
"cat_features": {
"col_index": String(""),
"col_names": [Optional(RepeatableSomeOf(String("")))],
"max_num_value": Integer(0).ge(0),
"col_index_type": OneOf("inclusive", "exclusive").set_default_index(0),
"col_names_type": OneOf("inclusive", "exclusive").set_default_index(0),
"max_num_value_type": OneOf("intersection", "union").set_default_index(1)
}
},
"advanced": {
"row_batch": Integer(40000).ge(1),
"col_batch": Integer(64).ge(1)
}
}
}
}
| 2,239 | 31.463768 | 93 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_logistic_regression/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_logistic_regression_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_logistic_regression"
},
"input": {
"__rule__": [Optional("pretrained_model"), Required("trainset", "valset")],
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_indices(0)
],
"pretrained_model": {
"path": String(""),
"name": String("")
}
},
"output": {
"__rule__": [SomeOf("model", "onnx_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_logitstic_regression_[STAGE_ID].model")
},
"onnx_model": {
"name": String("vertical_logitstic_regression_[STAGE_ID].onnx")
},
"metric_train": {
"name": String("lr_metric_train_[STAGE_ID].csv")
},
"metric_val": {
"name": String("lr_metric_val_[STAGE_ID].csv")
},
"prediction_train": {
"name": String("lr_prediction_train_[STAGE_ID].csv")
},
"prediction_val": {
"name": String("lr_prediction_val_[STAGE_ID].csv")
},
"ks_plot_train": {
"name": String("lr_ks_plot_train_[STAGE_ID].csv")
},
"ks_plot_val": {
"name": String("lr_ks_plot_val_[STAGE_ID].csv")
},
"decision_table_train": {
"name": String("lr_decision_table_train_[STAGE_ID].csv")
},
"decision_table_val": {
"name": String("lr_decision_table_val_[STAGE_ID].csv")
},
"feature_importance": {
"name": String("lr_feature_importance_[STAGE_ID].csv")
},
"plot_ks": {
"name": String("lr_plot_ks_[STAGE_ID].json")
},
"plot_roc": {
"name": String("lr_plot_roc_[STAGE_ID].json")
},
"plot_lift": {
"name": String("lr_plot_lift_[STAGE_ID].json")
},
"plot_gain": {
"name": String("lr_plot_gain_[STAGE_ID].json")
},
"plot_precision_recall": {
"name": String("lr_plot_precision_recall_[STAGE_ID].json")
},
"plot_feature_importance": {
"name": String("lr_plot_feature_importance_[STAGE_ID].json")
},
"plot_loss": {
"name": String("lr_plot_loss_[STAGE_ID].json")
}
},
"train_info": {
"interaction_params": {
"save_frequency": Integer(-1),
"echo_training_metrics": Bool(True),
"write_training_prediction": Bool(True),
"write_validation_prediction": Bool(True)
},
"train_params": {
"global_epoch": Integer(10),
"batch_size": Integer(2048),
"encryption": {
"__rule__": OneOf("ckks", "paillier", "plain").set_default("ckks"),
"ckks": {
"poly_modulus_degree": Integer(8192),
"coeff_mod_bit_sizes": [
RepeatableSomeOf(Integer()).set_default(
[60, 40, 40, 60])
],
"global_scale_bit_size": Integer(40)
},
"paillier": {
"key_bit_size": OneOf(2048, 4096, 8192).set_default_index(0),
"precision": Optional(Integer(7).ge(1)).set_default_not_none(),
"djn_on": Bool(True),
"parallelize_on": Bool(True)
},
"plain": {}
},
"optimizer": {
"lr": Float(0.01),
"p": OneOf(0, 1, 2).set_default(2),
"alpha": Float(1e-4)
},
"metric": {
"__rule__": [Optional("decision_table"), Required("acc", "precision", "recall", "f1_score", "auc", "ks")],
"decision_table": {
"method": OneOf("equal_frequency", "equal_width").set_default_index(0),
"bins": Integer(10)
},
"acc": {},
"precision": {},
"recall": {},
"f1_score": {},
"auc": {},
"ks": {}
},
"early_stopping": {
# 这里的key必须是在metric里配置过的key
"key": OneOf("acc", "precision", "recall", "f1_score", "auc", "ks").set_default_index(-1),
"patience": Integer(10),
"delta": Float(0.001)
},
"random_seed": Optional(Integer(50))
}
}
}
| 5,373 | 34.124183 | 122 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_logistic_regression/sync.py | from x_types import String, Bool, Integer, Float, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_logistic_regression_sync_rule = {
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"batch_size": All(),
"encryption": All(),
"optimizer": All(),
"random_seed": All()
}
}
}
| 458 | 23.157895 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_logistic_regression/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_logistic_regression_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_logistic_regression"
},
"input": {
"__rule__": [Optional("pretrained_model"), Required("trainset", "valset")],
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_indices(0)
],
"pretrained_model": {
"path": String(""),
"name": String("")
}
},
"output": {
"__rule__": [SomeOf("model", "onnx_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_logitstic_regression_[STAGE_ID].model")
},
"onnx_model": {
"name": String("vertical_logitstic_regression_[STAGE_ID].onnx")
},
},
"train_info": {
}
}
| 1,573 | 28.698113 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/local_feature_preprocess/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
local_feature_preprocess_rule = {
"identity": "label_trainer",
"model_info": {
"name": "local_feature_preprocess"
},
"input": {
"__rule__": [Required("trainset"), Optional("valset")],
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
],
"valset": [
Optional(OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
).set_default_not_none()
]
},
"output": {
"__rule__": [Required("path", "trainset", "model"), Optional("valset")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("local_feature_preprocess_[STAGE_ID].pt")
},
"trainset": {
"name": String("preprocessed_train_[STAGE_ID].csv")
},
"valset": {
"name": String("preprocessed_val_[STAGE_ID].csv")
}
},
"train_info": {
"train_params":
{
"missing":
{
"missing_values": OneOf(Any(None), [Any(None)]).set_default_index(0), # list,
"strategy": OneOf("mean", "median", "constant", "most_frequent").set_default_index(0),
"fill_value": Any(None),
"missing_features": {
"__rule__": Optional(RepeatableSomeOf(String(""))),
String(""):
{
"missing_values": OneOf(Any(None), [Any(None)]).set_default_index(0),
"strategy": OneOf("mean", "median", "constant", "most_frequent").set_default_index(0),
"fill_value": Any(None)
},
}
},
"outlier":
{
"outlier_values": OneOf(Any(None), [Any(None)]).set_default_index(0),
"outlier_features": {
"__rule__": Optional(RepeatableSomeOf(String(""))),
String(""):
{
"outlier_values": OneOf(Any(None), [Any(None)]).set_default_index(0)
},
}
},
"onehot":
{
"onehot_features": {
"__rule__": Optional(RepeatableSomeOf(String(""))),
String(""): {}
}
}
}
}
} | 3,031 | 33.850575 | 110 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_binning_woe_iv/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
horizontal_binning_woe_iv_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "horizontal_binning_woe_iv"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(),
"name": String(),
"has_label": Bool(True),
"has_id": Bool(True)
}
).set_default_index(0)
]
},
"train_info": {
"train_params": {
"binning": {
"method": OneOf("equal_width").set_default_index(0),
"bins": Integer(5)
}
}
}
}
| 862 | 25.96875 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_binning_woe_iv/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_binning_woe_iv_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_binning_woe_iv"
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"result": {
"name": String("woe_iv_result_[STAGE_ID].json")
}
},
"train_info": {
"train_params": {
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "numpy.ndarray",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
}
}
}
| 1,640 | 37.162791 | 109 | py |
XFL | XFL-master/python/algorithm/core/activation.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def sigmoid(x: np.ndarray):
res = np.where(x < 0, np.exp(x)/(1 + np.exp(x)), 1/(1 + np.exp(-x)))
return res
| 743 | 32.818182 | 74 | py |
XFL | XFL-master/python/algorithm/core/encryption_param.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Union, Dict, Any
from common.utils.constants import CKKS, PAILLIER, PLAIN, OTP
# used by xgboost
class EncryptionParam(object):
pass
class PaillierParam(object):
def __init__(self,
key_bit_size: int = 2048,
precision: Optional[int] = 7,
djn_on: bool = True,
parallelize_on: bool = False):
self.method = PAILLIER
self.key_bit_size = key_bit_size
self.precision = precision
self.djn_on = djn_on
self.parallelize_on = parallelize_on
class CKKSParam(object):
def __init__(self,
poly_modulus_degree: int = 8192,
coeff_mod_bit_sizes: List[int] = [60, 40, 40, 60],
global_scale_bit_size: int = 40):
self.method = CKKS
self.poly_modulus_degress = poly_modulus_degree
self.coeff_mod_bit_sizes = coeff_mod_bit_sizes
self.global_scale_bit_size = global_scale_bit_size
class OTPParam(object):
def __init__(self,
key_bitlength: int = 64,
data_type: str = "torch.Tensor",
key_exchange: Dict[str, Any] = None,
csprng: Dict[str, Any] = None):
self.method = OTP
self.key_bitlength = key_bitlength
self.data_tyep = data_type
self.key_exchange = key_exchange
self.csprng = csprng
class PlainParam(object):
def __init__(self):
self.method = PLAIN
def get_encryption_param(method: str, params: Optional[dict] = None) -> Union[PlainParam, PAILLIER, CKKS]:
if method == PLAIN:
return PlainParam()
elif method == PAILLIER:
return PaillierParam(**params)
elif method == CKKS:
return CKKSParam(**params)
elif method == OTP:
return OTPParam(**params)
else:
raise ValueError(f"Encryption method {method} not supported.")
| 2,593 | 31.425 | 106 | py |
XFL | XFL-master/python/algorithm/core/paillier_acceleration.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
def embed(p_list: List[np.ndarray], interval: int = (1 << 128), precision: int = 64):
def _embed(_p_list):
x = int(_p_list[0] * (1 << precision))
for i in range(len(_p_list) - 1):
x = x * interval + int(_p_list[i+1] * (1 << precision))
return x
out = [0] * len(p_list[0])
for i in range(len(p_list[0])):
_p_list = [p_list[j][i] for j in range(len(p_list))]
out[i] = _embed(_p_list)
return np.array(out)
def umbed(a: np.ndarray, num: int, interval: int = (1 << 128), precison: int = 64) -> List[list]:
def _umbed(x):
res = [0] * num
# a, b = divmod(x, interval)
b = x % interval
if abs(b) > interval // 2:
b = b - interval
a = (x - b) // interval
res[-1] = b / (1 << precison)
for i in range(num - 1):
# y, b = divmod(a, interval)
b = a % interval
if abs(b) > interval // 2:
b = b - interval
a = (a - b) // interval
res[-i-2] = b / (1 << precison)
return np.array(res).astype(np.float32)
out = [[0] * len(a) for i in range(num)]
for i in range(len(a)):
temp = _umbed(a[i])
for j in range(num):
out[j][i] = temp[j]
return out
def unpack(x: float, num: int, interval: int = (1 << 128), precison: int = 64) -> List[list]:
res = [0] * num
# a, b = divmod(x, interval)
b = x % interval
if abs(b) > interval // 2:
b = b - interval
a = (x - b) // interval
res[-1] = float(b / (1 << precison))
for i in range(num - 1):
# y, b = divmod(a, interval)
b = a % interval
if abs(b) > interval // 2:
b = b - interval
a = (a - b) // interval
res[-i-2] = float(b / (1 << precison))
return res | 2,513 | 31.230769 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.