diff --git a/nvflare/app_opt/xgboost/histogram_based/executor.py b/nvflare/app_opt/xgboost/histogram_based/executor.py index d9233974c9..1af46ba949 100644 --- a/nvflare/app_opt/xgboost/histogram_based/executor.py +++ b/nvflare/app_opt/xgboost/histogram_based/executor.py @@ -37,8 +37,8 @@ def __init__(self, xgb_params: dict, num_rounds=10, early_stopping_rounds=2, ver """Container for all XGBoost parameters. Args: - xgb_params: This dict is passed to `xgboost.train()` as the first argument `params`. - It contains all the Booster parameters. + xgb_params: The Booster parameters. This dict is passed to `xgboost.train()` + as the argument `params`. It contains all the Booster parameters. Please refer to XGBoost documentation for details: https://xgboost.readthedocs.io/en/stable/python/python_api.html#module-xgboost.training """ diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py index e5fb71d0f5..1105bd9b91 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py @@ -73,6 +73,9 @@ class since the self object contains a sender that contains a Core Cell which ca Constant.RUNNER_CTX_SERVER_ADDR: server_addr, Constant.RUNNER_CTX_RANK: self.rank, Constant.RUNNER_CTX_NUM_ROUNDS: self.num_rounds, + Constant.RUNNER_CTX_TRAINING_MODE: self.training_mode, + Constant.RUNNER_CTX_XGB_PARAMS: self.xgb_params, + Constant.RUNNER_CTX_XGB_OPTIONS: self.xgb_options, Constant.RUNNER_CTX_MODEL_DIR: self._run_dir, Constant.RUNNER_CTX_TB_DIR: self._app_dir, } @@ -96,9 +99,6 @@ def start(self, fl_ctx: FLContext): if self.rank is None: raise RuntimeError("cannot start - my rank is not set") - if not self.num_rounds: - raise RuntimeError("cannot start - num_rounds is not set") - # dynamically determine address on localhost port = get_open_tcp_port(resources={}) if not port: diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py index d0ec51de7c..e1c6f05134 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py @@ -138,6 +138,9 @@ def __init__(self, in_process, per_msg_timeout: float, tx_timeout: float): self.stopped = False self.rank = None self.num_rounds = None + self.training_mode = None + self.xgb_params = None + self.xgb_options = None self.world_size = None self.per_msg_timeout = per_msg_timeout self.tx_timeout = tx_timeout @@ -163,27 +166,36 @@ def configure(self, config: dict, fl_ctx: FLContext): Returns: None """ - ws = config.get(Constant.CONF_KEY_WORLD_SIZE) + ranks = config.get(Constant.CONF_KEY_CLIENT_RANKS) + ws = len(ranks) if not ws: raise RuntimeError("world_size is not configured") - - check_positive_int(Constant.CONF_KEY_WORLD_SIZE, ws) self.world_size = ws - rank = config.get(Constant.CONF_KEY_RANK) + me = fl_ctx.get_identity_name() + rank = ranks.get(me) if rank is None: raise RuntimeError("rank is not configured") - check_non_negative_int(Constant.CONF_KEY_RANK, rank) self.rank = rank num_rounds = config.get(Constant.CONF_KEY_NUM_ROUNDS) - if num_rounds is None: - raise RuntimeError("num_rounds is not configured") + if num_rounds is None or num_rounds <= 0: + raise RuntimeError("num_rounds is not configured or invalid value") check_positive_int(Constant.CONF_KEY_NUM_ROUNDS, num_rounds) self.num_rounds = num_rounds + self.training_mode = config.get(Constant.CONF_KEY_TRAINING_MODE) + if self.training_mode is None: + raise RuntimeError("training_mode is not configured") + + self.xgb_params = config.get(Constant.CONF_KEY_XGB_PARAMS) + if not self.xgb_params: + raise RuntimeError("xgb_params is not configured") + + self.xgb_options = config.get(Constant.CONF_KEY_XGB_OPTIONS, {}) + def _send_request(self, op: str, req: Shareable) -> (bytes, Shareable): """Send XGB operation request to the FL server via FLARE message. diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py index 39370b7033..eec1d02b29 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py @@ -25,7 +25,7 @@ from nvflare.fuel.utils.validation_utils import check_number_range, check_object_type, check_positive_number, check_str from nvflare.security.logging import secure_format_exception -from .defs import Constant +from .defs import TRAINING_MODE_MAPPING, Constant class ClientStatus: @@ -57,6 +57,9 @@ def __init__( self, adaptor_component_id: str, num_rounds: int, + training_mode: str, + xgb_params: dict, + xgb_options: dict, configure_task_name=Constant.CONFIG_TASK_NAME, configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT, start_task_name=Constant.START_TASK_NAME, @@ -69,9 +72,15 @@ def __init__( """ Constructor + For the meaning of XGBoost parameters, please refer to the documentation for train API, + https://xgboost.readthedocs.io/en/stable/python/python_api.html#xgboost.train + Args: adaptor_component_id - the component ID of server target adaptor num_rounds - number of rounds + training_mode - Split mode (horizontal, vertical, horizontal_secure, vertical_secure) + xgb_params - The params argument for train method + xgb_options - All other arguments for train method are passed through this dictionary configure_task_name - name of the config task configure_task_timeout - time to wait for clients’ responses to the config task before timeout. start_task_name - name of the start task @@ -89,6 +98,9 @@ def __init__( Controller.__init__(self) self.adaptor_component_id = adaptor_component_id self.num_rounds = num_rounds + self.training_mode = training_mode.lower() + self.xgb_params = xgb_params + self.xgb_options = xgb_options self.configure_task_name = configure_task_name self.start_task_name = start_task_name self.start_task_timeout = start_task_timeout @@ -104,6 +116,17 @@ def __init__( self.client_statuses = {} # client name => ClientStatus self.abort_signal = None + check_str("training_mode", training_mode) + valid_mode = TRAINING_MODE_MAPPING.keys() + if training_mode not in valid_mode: + raise ValueError(f"training_mode must be one of following values: {valid_mode}") + + if not self.xgb_params: + raise ValueError("xgb_params can't be empty") + + if not self.xgb_options: + self.xgb_options = {} + check_str("adaptor_component_id", adaptor_component_id) check_number_range("configure_task_timeout", configure_task_timeout, min_value=1) check_number_range("start_task_timeout", start_task_timeout, min_value=1) @@ -427,6 +450,9 @@ def _configure_clients(self, abort_signal: Signal, fl_ctx: FLContext): shareable[Constant.CONF_KEY_CLIENT_RANKS] = self.client_ranks shareable[Constant.CONF_KEY_NUM_ROUNDS] = self.num_rounds + shareable[Constant.CONF_KEY_TRAINING_MODE] = self.training_mode + shareable[Constant.CONF_KEY_XGB_PARAMS] = self.xgb_params + shareable[Constant.CONF_KEY_XGB_OPTIONS] = self.xgb_options task = Task( name=self.configure_task_name, diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py index 864f4cebc7..b559306440 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py @@ -26,9 +26,12 @@ class Constant: CONF_KEY_RANK = "rank" CONF_KEY_WORLD_SIZE = "world_size" CONF_KEY_NUM_ROUNDS = "num_rounds" + CONF_KEY_TRAINING_MODE = "training_mode" + CONF_KEY_XGB_PARAMS = "xgb_params" + CONF_KEY_XGB_OPTIONS = "xgb_options" # default component config values - CONFIG_TASK_TIMEOUT = 10 + CONFIG_TASK_TIMEOUT = 20 START_TASK_TIMEOUT = 10 XGB_SERVER_READY_TIMEOUT = 10.0 @@ -88,6 +91,9 @@ class Constant: RUNNER_CTX_PORT = "port" RUNNER_CTX_CLIENT_NAME = "client_name" RUNNER_CTX_NUM_ROUNDS = "num_rounds" + RUNNER_CTX_TRAINING_MODE = "training_mode" + RUNNER_CTX_XGB_PARAMS = "xgb_params" + RUNNER_CTX_XGB_OPTIONS = "xgb_options" RUNNER_CTX_WORLD_SIZE = "world_size" RUNNER_CTX_RANK = "rank" RUNNER_CTX_DATA_LOADER = "data_loader" @@ -111,3 +117,25 @@ class Constant: ("grpc.max_send_message_length", MAX_FRAME_SIZE), ("grpc.max_receive_message_length", MAX_FRAME_SIZE), ] + + +class SplitMode: + ROW = 0 + COL = 1 + COL_SECURE = 2 + ROW_SECURE = 3 + + +# Mapping of text training mode to split mode +TRAINING_MODE_MAPPING = { + "h": SplitMode.ROW, + "horizontal": SplitMode.ROW, + "v": SplitMode.COL, + "vertical": SplitMode.COL, + "hs": SplitMode.ROW_SECURE, + "horizontal_secure": SplitMode.ROW_SECURE, + "vs": SplitMode.COL_SECURE, + "vertical_secure": SplitMode.COL_SECURE, +} + +SECURE_TRAINING_MODES = {"hs", "horizontal_secure", "vs", "vertical_secure"} diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/executor.py b/nvflare/app_opt/xgboost/histogram_based_v2/executor.py index dc870ff094..4324dc87dc 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/executor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/executor.py @@ -115,15 +115,9 @@ def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort self.log_error(fl_ctx, f"missing {Constant.CONF_KEY_NUM_ROUNDS} from config") return make_reply(ReturnCode.BAD_TASK_DATA) - world_size = len(ranks) - # configure the XGB client target via the adaptor self.adaptor.configure( - { - Constant.CONF_KEY_RANK: my_rank, - Constant.CONF_KEY_NUM_ROUNDS: num_rounds, - Constant.CONF_KEY_WORLD_SIZE: world_size, - }, + shareable, fl_ctx, ) return make_reply(ReturnCode.OK) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py index 494605ff9d..a63bcb7061 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/fed_controller.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import uuid +from typing import Optional from nvflare.apis.fl_context import FLContext from nvflare.app_opt.xgboost.histogram_based_v2.adaptors.grpc_server_adaptor import GrpcServerAdaptor @@ -18,12 +20,16 @@ from .controller import XGBController from .defs import Constant +from .sec.server_handler import ServerSecurityHandler class XGBFedController(XGBController): def __init__( self, num_rounds: int, + training_mode: str, + xgb_params: dict, + xgb_options: Optional[dict] = None, configure_task_name=Constant.CONFIG_TASK_NAME, configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT, start_task_name=Constant.START_TASK_NAME, @@ -39,6 +45,9 @@ def __init__( self, adaptor_component_id="", num_rounds=num_rounds, + training_mode=training_mode, + xgb_params=xgb_params, + xgb_options=xgb_options, configure_task_name=configure_task_name, configure_task_timeout=configure_task_timeout, start_task_name=start_task_name, @@ -52,6 +61,11 @@ def __init__( self.in_process = in_process def get_adaptor(self, fl_ctx: FLContext): + + engine = fl_ctx.get_engine() + handler = ServerSecurityHandler() + engine.add_component(str(uuid.uuid4()), handler) + runner = XGBServerRunner() runner.initialize(fl_ctx) adaptor = GrpcServerAdaptor( diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py b/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py index 168d8d328a..8d6aa5dc34 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py @@ -11,18 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import uuid + from nvflare.apis.fl_context import FLContext from nvflare.app_opt.xgboost.histogram_based_v2.adaptors.grpc_client_adaptor import GrpcClientAdaptor from nvflare.app_opt.xgboost.histogram_based_v2.runners.xgb_client_runner import XGBClientRunner from .executor import XGBExecutor +from .sec.client_handler import ClientSecurityHandler class FedXGBHistogramExecutor(XGBExecutor): def __init__( self, - early_stopping_rounds, - xgb_params: dict, data_loader_id: str, verbose_eval=False, use_gpus=False, @@ -39,8 +40,6 @@ def __init__( per_msg_timeout=per_msg_timeout, tx_timeout=tx_timeout, ) - self.early_stopping_rounds = early_stopping_rounds - self.xgb_params = xgb_params self.data_loader_id = data_loader_id self.verbose_eval = verbose_eval self.use_gpus = use_gpus @@ -50,12 +49,13 @@ def __init__( self.in_process = in_process def get_adaptor(self, fl_ctx: FLContext): + + engine = fl_ctx.get_engine() + handler = ClientSecurityHandler() + engine.add_component(str(uuid.uuid4()), handler) + runner = XGBClientRunner( data_loader_id=self.data_loader_id, - early_stopping_rounds=self.early_stopping_rounds, - xgb_params=self.xgb_params, - verbose_eval=self.verbose_eval, - use_gpus=self.use_gpus, model_file_name=self.model_file_name, metrics_writer_id=self.metrics_writer_id, ) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py index 35627bc3cd..1b98829711 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py @@ -17,34 +17,30 @@ from xgboost import callback from nvflare.apis.fl_component import FLComponent +from nvflare.apis.fl_constant import SystemConfigs from nvflare.apis.fl_context import FLContext from nvflare.app_common.tracking.log_writer import LogWriter from nvflare.app_opt.xgboost.data_loader import XGBDataLoader -from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant +from nvflare.app_opt.xgboost.histogram_based_v2.defs import SECURE_TRAINING_MODES, Constant from nvflare.app_opt.xgboost.histogram_based_v2.runners.xgb_runner import AppRunner from nvflare.app_opt.xgboost.histogram_based_v2.tb import TensorBoardCallback -from nvflare.app_opt.xgboost.histogram_based_v2.xgb_params import XGBoostParams from nvflare.app_opt.xgboost.metrics_cb import MetricsCallback +from nvflare.fuel.utils.config_service import ConfigService from nvflare.fuel.utils.import_utils import optional_import from nvflare.fuel.utils.obj_utils import get_logger +from nvflare.utils.cli_utils import get_package_root + +LOADER_PARAMS_LIBRARY_PATH = "LIBRARY_PATH" class XGBClientRunner(AppRunner, FLComponent): def __init__( self, data_loader_id: str, - early_stopping_rounds: int, - xgb_params: dict, - verbose_eval, - use_gpus, model_file_name, metrics_writer_id: str = None, ): FLComponent.__init__(self) - self.early_stopping_rounds = early_stopping_rounds - self.xgb_params = xgb_params - self.verbose_eval = verbose_eval - self.use_gpus = use_gpus self.model_file_name = model_file_name self.data_loader_id = data_loader_id self.logger = get_logger(self) @@ -53,6 +49,9 @@ def __init__( self._rank = None self._world_size = None self._num_rounds = None + self._training_mode = None + self._xgb_params = None + self._xgb_options = None self._server_addr = None self._data_loader = None self._tb_dir = None @@ -72,11 +71,15 @@ def initialize(self, fl_ctx: FLContext): if not isinstance(self._metrics_writer, LogWriter): self.system_panic("writer should be type LogWriter", fl_ctx) - def _xgb_train(self, params: XGBoostParams, train_data: xgb.DMatrix, val_data) -> xgb.core.Booster: + def _xgb_train(self, num_rounds, xgb_params: dict, xgb_options: dict, train_data, val_data) -> xgb.core.Booster: """XGBoost training logic. Args: - params (XGBoostParams): xgboost parameters. + num_rounds: Number of rounds + xgb_params: The Boost parameters for XGBoost train method + xgb_options: Other arguments needed by XGBoost + train_data: Training data + val_data: Validation data Returns: A xgboost booster. @@ -92,14 +95,17 @@ def _xgb_train(self, params: XGBoostParams, train_data: xgb.DMatrix, val_data) - if flag and self._tb_dir: callbacks.append(TensorBoardCallback(self._tb_dir, tensorboard)) + early_stopping_rounds = xgb_options.get("early_stopping_rounds", 0) + verbose_eval = xgb_options.get("verbose_eval", False) + # Run training, all the features in training API is available. bst = xgb.train( - params.xgb_params, + xgb_params, train_data, - params.num_rounds, + num_rounds, evals=watchlist, - early_stopping_rounds=params.early_stopping_rounds, - verbose_eval=params.verbose_eval, + early_stopping_rounds=early_stopping_rounds, + verbose_eval=verbose_eval, callbacks=callbacks, ) return bst @@ -109,40 +115,74 @@ def run(self, ctx: dict): self._rank = ctx.get(Constant.RUNNER_CTX_RANK) self._world_size = ctx.get(Constant.RUNNER_CTX_WORLD_SIZE) self._num_rounds = ctx.get(Constant.RUNNER_CTX_NUM_ROUNDS) + self._training_mode = ctx.get(Constant.RUNNER_CTX_TRAINING_MODE) + self._xgb_params = ctx.get(Constant.RUNNER_CTX_XGB_PARAMS) + self._xgb_options = ctx.get(Constant.RUNNER_CTX_XGB_OPTIONS) self._server_addr = ctx.get(Constant.RUNNER_CTX_SERVER_ADDR) # self._data_loader = ctx.get(Constant.RUNNER_CTX_DATA_LOADER) self._tb_dir = ctx.get(Constant.RUNNER_CTX_TB_DIR) self._model_dir = ctx.get(Constant.RUNNER_CTX_MODEL_DIR) - if self.use_gpus: + use_gpus = self._xgb_options.get("use_gpus", False) + if use_gpus: # mapping each rank to a GPU (can set to cuda:0 if simulating with only one gpu) self.logger.info(f"Training with GPU {self._rank}") - self.xgb_params["device"] = f"cuda:{self._rank}" - - self.logger.info(f"Using xgb params: {self.xgb_params}") - params = XGBoostParams( - xgb_params=self.xgb_params, - num_rounds=self._num_rounds, - early_stopping_rounds=self.early_stopping_rounds, - verbose_eval=self.verbose_eval, - ) + self._xgb_params["device"] = f"cuda:{self._rank}" + self.logger.info( + f"XGB trainging_mode: {self._training_mode} " f"params: {self._xgb_params} XGB options: {self._xgb_options}" + ) self.logger.info(f"server address is {self._server_addr}") + communicator_env = { - "dmlc_communicator": "federated", + "xgboost_communicator": "federated", "federated_server_address": f"{self._server_addr}", "federated_world_size": self._world_size, "federated_rank": self._rank, - # FIXME: It should be possible to customize this or find a better location - # to distribut the shared object, preferably along side the nvflare Python - # package. - "federated_plugin": {"path": "/tmp/libproc_nvflare.so"}, } + + if self._training_mode not in SECURE_TRAINING_MODES: + self.logger.info("XGBoost non-secure training") + else: + xgb_plugin_name = ConfigService.get_str_var( + name="xgb_plugin_name", conf=SystemConfigs.RESOURCES_CONF, default="nvflare" + ) + + xgb_loader_params = ConfigService.get_dict_var( + name="xgb_loader_params", conf=SystemConfigs.RESOURCES_CONF, default={} + ) + + # Library path is frequently used, add a scalar config var and overwrite what's in the dict + xgb_library_path = ConfigService.get_str_var(name="xgb_library_path", conf=SystemConfigs.RESOURCES_CONF) + if xgb_library_path: + xgb_loader_params[LOADER_PARAMS_LIBRARY_PATH] = xgb_library_path + + lib_path = xgb_loader_params.get(LOADER_PARAMS_LIBRARY_PATH, None) + if not lib_path: + xgb_loader_params[LOADER_PARAMS_LIBRARY_PATH] = str(get_package_root() / "libs") + + xgb_proc_params = ConfigService.get_dict_var( + name="xgb_proc_params", conf=SystemConfigs.RESOURCES_CONF, default={} + ) + + self.logger.info( + f"XGBoost secure mode: {self._training_mode} plugin_name: {xgb_plugin_name} " + f"proc_params: {xgb_proc_params} loader_params: {xgb_loader_params}" + ) + + communicator_env.update( + { + "plugin_name": xgb_plugin_name, + "proc_params": xgb_proc_params, + "loader_params": xgb_loader_params, + } + ) + with xgb.collective.CommunicatorContext(**communicator_env): # Load the data. Dmatrix must be created with column split mode in CommunicatorContext for vertical FL - train_data, val_data = self._data_loader.load_data(self._client_name) + train_data, val_data = self._data_loader.load_data(self._client_name, self._training_mode) - bst = self._xgb_train(params, train_data, val_data) + bst = self._xgb_train(self._num_rounds, self._xgb_params, self._xgb_options, train_data, val_data) # Save the model. bst.save_model(os.path.join(self._model_dir, self.model_file_name)) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_server_runner.py b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_server_runner.py index 4f7752faee..32e708c90e 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_server_runner.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_server_runner.py @@ -30,7 +30,7 @@ def run(self, ctx: dict): xgb_federated.run_federated_server( port=self._port, - n_workers=self._world_size, + world_size=self._world_size, ) self._stopped = True diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/partial_he/util.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/partial_he/util.py index 6051d1b336..25d70cd75c 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/partial_he/util.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/partial_he/util.py @@ -16,8 +16,15 @@ from base64 import urlsafe_b64decode, urlsafe_b64encode from binascii import hexlify, unhexlify -import ipcl_python -from ipcl_python import PaillierEncryptedNumber as EncryptedNumber +# ipcl_python is not a required dependency. The import error causes unit test failure so make it optional +try: + import ipcl_python + from ipcl_python import PaillierEncryptedNumber as EncryptedNumber + from ipcl_python.ipcl_python import BNUtils, ipclCipherText + + ipcl_imported = True +except Exception: + ipcl_imported = False SCALE_FACTOR = 10000000000000 ENABLE_DJN = True @@ -37,11 +44,11 @@ def create_pub_key(key, n_length=1024): def ciphertext_to_int(d): cifer = d.ciphertextBN() - return ipcl_python.BNUtils.BN2int(cifer[0]) + return BNUtils.BN2int(cifer[0]) def int_to_ciphertext(d, pubkey): - return ipcl_python.ipclCipherText(pubkey.pubkey, ipcl_python.BNUtils.int2BN(d)) + return ipclCipherText(pubkey.pubkey, BNUtils.int2BN(d)) def get_exponent(d): diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/xgb_params.py b/nvflare/app_opt/xgboost/histogram_based_v2/xgb_params.py deleted file mode 100644 index bf5d4f9b81..0000000000 --- a/nvflare/app_opt/xgboost/histogram_based_v2/xgb_params.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class XGBoostParams: - def __init__(self, xgb_params: dict, num_rounds=10, early_stopping_rounds=2, verbose_eval=False): - """Container for all XGBoost parameters. - - Args: - xgb_params: This dict is passed to `xgboost.train()` as the first argument `params`. - It contains all the Booster parameters. - Please refer to XGBoost documentation for details: - https://xgboost.readthedocs.io/en/stable/python/python_api.html#module-xgboost.training - """ - self.num_rounds = num_rounds - self.early_stopping_rounds = early_stopping_rounds - self.verbose_eval = verbose_eval - self.xgb_params: dict = xgb_params if xgb_params else {} diff --git a/nvflare/utils/cli_utils.py b/nvflare/utils/cli_utils.py index 8fb72d9fab..25a705591b 100644 --- a/nvflare/utils/cli_utils.py +++ b/nvflare/utils/cli_utils.py @@ -28,6 +28,14 @@ def get_home_dir() -> Path: return Path.home() +def get_package_root() -> Path: + """ + Get the nvflare package root folder, e.g. + /usr/local/python/3.10/lib/python3.10/site-packages/nvflare + """ + return pathlib.Path(__file__).parent.parent.absolute().resolve() + + def get_hidden_nvflare_config_path(hidden_nvflare_dir: str) -> str: """ Get the path for the hidden nvflare configuration file.