From 7999327bcb5fedd2f4a7b529b98e4c04cbb008a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Mon, 29 Jul 2024 17:26:26 -0700 Subject: [PATCH 01/16] Fix cryptography encrypt error (#2732) --- nvflare/fuel/f3/cellnet/cell_cipher.py | 26 ++------------------------ nvflare/fuel/f3/cellnet/core_cell.py | 4 ++++ 2 files changed, 6 insertions(+), 24 deletions(-) diff --git a/nvflare/fuel/f3/cellnet/cell_cipher.py b/nvflare/fuel/f3/cellnet/cell_cipher.py index da8ddba23b..88866f81d8 100644 --- a/nvflare/fuel/f3/cellnet/cell_cipher.py +++ b/nvflare/fuel/f3/cellnet/cell_cipher.py @@ -89,7 +89,7 @@ def _verify(k, m, s): ) -def _sym_enc(k, n, m): +def _sym_enc(k: bytes, n: bytes, m: bytes): cipher = ciphers.Cipher(ciphers.algorithms.AES(k), ciphers.modes.CBC(n)) encryptor = cipher.encryptor() padder = padding.PKCS7(PADDING_LENGTH).padder() @@ -97,7 +97,7 @@ def _sym_enc(k, n, m): return encryptor.update(padded_data) + encryptor.finalize() -def _sym_dec(k, n, m): +def _sym_dec(k: bytes, n: bytes, m: bytes): cipher = ciphers.Cipher(ciphers.algorithms.AES(k), ciphers.modes.CBC(n)) decryptor = cipher.decryptor() plain_text = decryptor.update(m) @@ -157,28 +157,6 @@ def get_latest_key(self): return last_value -class CellCipher: - def __init__(self, session_key_manager: SessionKeyManager): - self.session_key_manager = session_key_manager - - def encrypt(self, message): - key = self.session_key_manager.get_latest_key() - key_hash = get_hash(key) - nonce = os.urandom(NONCE_LENGTH) - return nonce + key_hash[-HASH_LENGTH:] + _sym_enc(key, nonce, message) - - def decrypt(self, message): - nonce, key_hash, message = ( - message[:NONCE_LENGTH], - message[NONCE_LENGTH:HEADER_LENGTH], - message[HEADER_LENGTH:], - ) - key = self.session_key_manager.get_key(key_hash) - if key is None: - raise SessionKeyUnavailable("No session key found for received message") - return _sym_dec(key, nonce, message) - - class SimpleCellCipher: def __init__(self, root_ca: Certificate, pri_key: asymmetric.rsa.RSAPrivateKey, cert: Certificate): self._root_ca = root_ca diff --git a/nvflare/fuel/f3/cellnet/core_cell.py b/nvflare/fuel/f3/cellnet/core_cell.py index d7e821cbef..dbb2b8c10f 100644 --- a/nvflare/fuel/f3/cellnet/core_cell.py +++ b/nvflare/fuel/f3/cellnet/core_cell.py @@ -942,6 +942,10 @@ def encrypt_payload(self, message: Message): if message.payload is None: message.payload = bytes(0) + elif isinstance(message.payload, memoryview) or isinstance(message.payload, bytearray): + message.payload = bytes(message.payload) + elif not isinstance(message.payload, bytes): + raise RuntimeError(f"Payload type of {type(message.payload)} is not supported.") payload_len = len(message.payload) message.add_headers( From d669dca8d46460118bed96853eb54724688d2342 Mon Sep 17 00:00:00 2001 From: Yuhong Wen Date: Tue, 30 Jul 2024 15:34:59 -0400 Subject: [PATCH 02/16] keep the local resources for simulator (#2730) * keep the local resources for simulator. * fixed the local folder deploy. --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> --- .../private/fed/app/simulator/simulator_runner.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/nvflare/private/fed/app/simulator/simulator_runner.py b/nvflare/private/fed/app/simulator/simulator_runner.py index 3ccdcd1435..274ad2a889 100644 --- a/nvflare/private/fed/app/simulator/simulator_runner.py +++ b/nvflare/private/fed/app/simulator/simulator_runner.py @@ -149,7 +149,7 @@ def setup(self): for i in range(self.args.n_clients): self.client_names.append("site-" + str(i + 1)) - log_config_file_path = os.path.join(self.args.workspace, "startup", WorkspaceConstants.LOGGING_CONFIG) + log_config_file_path = os.path.join(self.args.workspace, "local", WorkspaceConstants.LOGGING_CONFIG) if not os.path.isfile(log_config_file_path): log_config_file_path = os.path.join(os.path.dirname(__file__), WorkspaceConstants.LOGGING_CONFIG) logging.config.fileConfig(fname=log_config_file_path, disable_existing_loggers=False) @@ -271,18 +271,29 @@ def _cleanup_workspace(self): with tempfile.TemporaryDirectory() as temp_dir: startup_dir = os.path.join(self.args.workspace, "startup") temp_start_up = os.path.join(temp_dir, "startup") + local_dir = os.path.join(self.args.workspace, "local") + temp_local_dir = os.path.join(temp_dir, "local") if os.path.exists(startup_dir): shutil.move(startup_dir, temp_start_up) + if os.path.exists(local_dir): + shutil.move(local_dir, temp_local_dir) + if os.path.exists(self.simulator_root): shutil.rmtree(self.simulator_root) + if os.path.exists(temp_start_up): shutil.move(temp_start_up, startup_dir) + if os.path.exists(temp_local_dir): + shutil.move(temp_local_dir, local_dir) def _setup_local_startup(self, log_config_file_path, workspace): local_dir = os.path.join(workspace, "local") startup = os.path.join(workspace, "startup") os.makedirs(local_dir, exist_ok=True) shutil.copyfile(log_config_file_path, os.path.join(local_dir, WorkspaceConstants.LOGGING_CONFIG)) + workspace_local = os.path.join(self.simulator_root, "local") + if os.path.exists(workspace_local): + shutil.copytree(workspace_local, local_dir, dirs_exist_ok=True) shutil.copytree(os.path.join(self.simulator_root, "startup"), startup) def validate_job_data(self): From 1ef5207237288f303429421e4dfb54040d81a6ae Mon Sep 17 00:00:00 2001 From: Holger Roth <6304754+holgerroth@users.noreply.github.com> Date: Tue, 30 Jul 2024 16:15:56 -0400 Subject: [PATCH 03/16] Support same app for all sites in Job API (#2714) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * support same app to all * add to_server() and to_clients() routines * comment out export * improve input errors handling * check for missing server components * address comments --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- .../pt/fedavg_script_executor_cifar10_all.py | 43 ++++ nvflare/job_config/fed_job.py | 186 ++++++++++++------ nvflare/job_config/fed_job_config.py | 9 + 3 files changed, 183 insertions(+), 55 deletions(-) create mode 100644 examples/getting_started/pt/fedavg_script_executor_cifar10_all.py diff --git a/examples/getting_started/pt/fedavg_script_executor_cifar10_all.py b/examples/getting_started/pt/fedavg_script_executor_cifar10_all.py new file mode 100644 index 0000000000..af15043785 --- /dev/null +++ b/examples/getting_started/pt/fedavg_script_executor_cifar10_all.py @@ -0,0 +1,43 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from src.net import Net + +from nvflare import FedAvg, FedJob, ScriptExecutor + +if __name__ == "__main__": + n_clients = 2 + num_rounds = 2 + train_script = "src/cifar10_fl.py" + + job = FedJob(name="cifar10_fedavg") + + # Define the controller workflow and send to server + controller = FedAvg( + num_clients=n_clients, + num_rounds=num_rounds, + ) + job.to_server(controller) + + # Define the initial global model and send to server + job.to_server(Net()) + + # Send executor to all clients + executor = ScriptExecutor( + task_script_path=train_script, task_script_args="" # f"--batch_size 32 --data_path /tmp/data/site-{i}" + ) + job.to_clients(executor) + + # job.export_job("/tmp/nvflare/jobs/job_config") + job.simulator_run("/tmp/nvflare/jobs/workdir", n_clients=n_clients) diff --git a/nvflare/job_config/fed_job.py b/nvflare/job_config/fed_job.py index e006729fe8..bd72a8fce6 100644 --- a/nvflare/job_config/fed_job.py +++ b/nvflare/job_config/fed_job.py @@ -19,12 +19,14 @@ from nvflare.apis.executor import Executor from nvflare.apis.filter import Filter from nvflare.apis.impl.controller import Controller +from nvflare.apis.job_def import ALL_SITES, SERVER_SITE_NAME from nvflare.app_common.executors.script_executor import ScriptExecutor from nvflare.app_common.widgets.convert_to_fed_event import ConvertToFedEvent from nvflare.app_common.widgets.intime_model_selector import IntimeModelSelector from nvflare.app_common.widgets.validation_json_generator import ValidationJsonGenerator from nvflare.fuel.utils.class_utils import get_component_init_parameters from nvflare.fuel.utils.import_utils import optional_import +from nvflare.fuel.utils.validation_utils import check_positive_int from nvflare.job_config.fed_app_config import ClientAppConfig, FedAppConfig, ServerAppConfig from nvflare.job_config.fed_job_config import FedJobConfig @@ -103,6 +105,56 @@ def add_external_scripts(self, external_scripts: List): self.app.add_ext_script(_script) +class ExecutorApp(FedApp): + def __init__(self): + """Wrapper around `ClientAppConfig`.""" + super().__init__() + self._create_client_app() + + def add_executor(self, executor, tasks=None): + if tasks is None: + tasks = ["*"] # Add executor for any task by default + self.app.add_executor(tasks, executor) + + def _create_client_app(self): + self.app = ClientAppConfig() + + component = ConvertToFedEvent(events_to_convert=["analytix_log_stats"], fed_event_prefix="fed.") + self.app.add_component("event_to_fed", component) + + +class ControllerApp(FedApp): + """Wrapper around `ServerAppConfig`. + + Args: + """ + + def __init__(self, key_metric="accuracy"): + super().__init__() + self.key_metric = key_metric + self._create_server_app() + + def add_controller(self, controller, id=None): + if id is None: + id = "controller" + self.app.add_workflow(self._gen_tracked_id(id), controller) + + def _create_server_app(self): + self.app: ServerAppConfig = ServerAppConfig() + + component = ValidationJsonGenerator() + self.app.add_component("json_generator", component) + + if self.key_metric: + component = IntimeModelSelector(key_metric=self.key_metric) + self.app.add_component("model_selector", component) + + # TODO: make different tracking receivers configurable + if torch_ok and tb_ok: + component = TBAnalyticsReceiver(events=["fed.analytix_log_stats"]) + self.app.add_component("receiver", component) + + class FedJob: def __init__(self, name="fed_job", min_clients=1, mandatory_clients=None, key_metric="accuracy") -> None: """FedJob allows users to generate job configurations in a Pythonic way. @@ -136,7 +188,7 @@ def to( filter_type: FilterType = None, id=None, ): - """assign an `obj` to a target (server or clients). + """assign an object to a target (server or clients). Args: obj: The object to be assigned. The obj will be given a default `id` if non is provided based on its type. @@ -218,6 +270,51 @@ def to( if self._components: self._add_referenced_components(obj, target) + def to_server( + self, + obj: Any, + filter_type: FilterType = None, + id=None, + ): + """assign an object to the server. + + Args: + obj: The object to be assigned. The obj will be given a default `id` if non is provided based on its type. + filter_type: The type of filter used. Either `FilterType.TASK_RESULT` or `FilterType.TASK_DATA`. + id: Optional user-defined id for the object. Defaults to `None` and ID will automatically be assigned. + + Returns: + + """ + if isinstance(obj, Executor): + raise ValueError("Use `job.to(executor, )` or `job.to_clients(executor)` for Executors.") + + self.to(obj=obj, target=SERVER_SITE_NAME, filter_type=filter_type, id=id) + + def to_clients( + self, + obj: Any, + tasks: List[str] = None, + filter_type: FilterType = None, + id=None, + ): + """assign an object to all clients. + + Args: + obj (Any): Object to be deployed. + tasks: In case object is an `Executor`, optional list of tasks the executor should handle. + Defaults to `None`. If `None`, all tasks will be handled using `[*]`. + filter_type: The type of filter used. Either `FilterType.TASK_RESULT` or `FilterType.TASK_DATA`. + id: Optional user-defined id for the object. Defaults to `None` and ID will automatically be assigned. + + Returns: + + """ + if isinstance(obj, Controller): + raise ValueError('Use `job.to(controller, "server")` or `job.to_server(controller)` for Controllers.') + + self.to(obj=obj, target=ALL_SITES, tasks=tasks, filter_type=filter_type, id=id) + def as_id(self, obj: Any): id = str(uuid.uuid4()) self._components[id] = obj @@ -260,10 +357,30 @@ def _set_site_app(self, app: FedApp, target: str): self.job.add_fed_app(app_name, app_config) self.job.set_site_app(target, app_name) + def _set_all_app(self, client_app: ExecutorApp, server_app: ControllerApp): + if not isinstance(client_app, ExecutorApp): + raise ValueError(f"`client_app` needs to be of type `ExecutorApp` but was type {type(client_app)}") + if not isinstance(server_app, ControllerApp): + raise ValueError(f"`server_app` needs to be of type `ControllerApp` but was type {type(server_app)}") + + client_config = client_app.get_app_config() + server_config = server_app.get_app_config() + + app_config = FedAppConfig(server_app=server_config, client_app=client_config) + app_name = "app" + + self.job.add_fed_app(app_name, app_config) + self.job.set_site_app(ALL_SITES, app_name) + def _set_all_apps(self): if not self._deployed: - for target in self._deploy_map: - self._set_site_app(self._deploy_map[target], target) + if ALL_SITES in self._deploy_map: + if SERVER_SITE_NAME not in self._deploy_map: + raise ValueError('Missing server components! Deploy using `to(obj, "server") or `to_server(obj)`') + self._set_all_app(client_app=self._deploy_map[ALL_SITES], server_app=self._deploy_map[SERVER_SITE_NAME]) + else: + for target in self._deploy_map: + self._set_site_app(self._deploy_map[target], target) self._deployed = True @@ -271,10 +388,19 @@ def export_job(self, job_root): self._set_all_apps() self.job.generate_job_config(job_root) - def simulator_run(self, workspace, threads: int = None): + def simulator_run(self, workspace, n_clients: int = None, threads: int = None): self._set_all_apps() + if ALL_SITES in self.clients and not n_clients: + raise ValueError("Clients were not specified using to(). Please provide the number of clients to simulate.") + elif ALL_SITES in self.clients and n_clients: + check_positive_int("n_clients", n_clients) + self.clients = [f"site-{i}" for i in range(1, n_clients + 1)] + elif self.clients and n_clients: + raise ValueError("You already specified clients using `to()`. Don't use `n_clients` in simulator_run.") + n_clients = len(self.clients) + if threads is None: threads = n_clients @@ -290,56 +416,6 @@ def _validate_target(self, target): if not target: raise ValueError("Must provide a valid target name") - if any(c in SPECIAL_CHARACTERS for c in target): + if any(c in SPECIAL_CHARACTERS for c in target) and target != ALL_SITES: raise ValueError(f"target {target} name contains invalid character") pass - - -class ExecutorApp(FedApp): - def __init__(self): - """Wrapper around `ClientAppConfig`.""" - super().__init__() - self._create_client_app() - - def add_executor(self, executor, tasks=None): - if tasks is None: - tasks = ["*"] # Add executor for any task by default - self.app.add_executor(tasks, executor) - - def _create_client_app(self): - self.app = ClientAppConfig() - - component = ConvertToFedEvent(events_to_convert=["analytix_log_stats"], fed_event_prefix="fed.") - self.app.add_component("event_to_fed", component) - - -class ControllerApp(FedApp): - """Wrapper around `ServerAppConfig`. - - Args: - """ - - def __init__(self, key_metric="accuracy"): - super().__init__() - self.key_metric = key_metric - self._create_server_app() - - def add_controller(self, controller, id=None): - if id is None: - id = "controller" - self.app.add_workflow(self._gen_tracked_id(id), controller) - - def _create_server_app(self): - self.app: ServerAppConfig = ServerAppConfig() - - component = ValidationJsonGenerator() - self.app.add_component("json_generator", component) - - if self.key_metric: - component = IntimeModelSelector(key_metric=self.key_metric) - self.app.add_component("model_selector", component) - - # TODO: make different tracking receivers configurable - if torch_ok and tb_ok: - component = TBAnalyticsReceiver(events=["fed.analytix_log_stats"]) - self.app.add_component("receiver", component) diff --git a/nvflare/job_config/fed_job_config.py b/nvflare/job_config/fed_job_config.py index 509e35d382..f98bc9ce9f 100644 --- a/nvflare/job_config/fed_job_config.py +++ b/nvflare/job_config/fed_job_config.py @@ -65,6 +65,15 @@ def add_fed_app(self, app_name: str, fed_app: FedAppConfig): self.fed_apps[app_name] = fed_app def set_site_app(self, site_name: str, app_name: str): + """assign an app to a certain site. + + Args: + site_name: The target site name. + app_name: The app name. + + Returns: + + """ if app_name not in self.fed_apps.keys(): raise RuntimeError(f"fed_app {app_name} does not exist.") From 00642aa4ed41a2e281b5e714343c3686d2fa6b9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Tue, 30 Jul 2024 14:11:59 -0700 Subject: [PATCH 04/16] Fix overseer test timing (#2743) --- tests/integration_test/overseer_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration_test/overseer_test.py b/tests/integration_test/overseer_test.py index cd9671bd4b..70dc2c9c9e 100644 --- a/tests/integration_test/overseer_test.py +++ b/tests/integration_test/overseer_test.py @@ -83,7 +83,7 @@ def test_overseer_overseer_down_and_up(self): oa_launcher.stop_overseer() time.sleep(10) oa_launcher.start_overseer() - time.sleep(10) + time.sleep(20) for client_agent in client_agent_list: psp = oa_launcher.get_primary_sp(client_agent) assert psp.name == "server00" From aa04a12c385be730a396a7b06b0f68a10a260484 Mon Sep 17 00:00:00 2001 From: Sean Yang Date: Tue, 30 Jul 2024 14:36:38 -0700 Subject: [PATCH 05/16] Add ModelController documentation (#2707) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add ModelController docs * address comments * address comments 2 * fix code block --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- .../controllers/model_controller.rst | 225 ++++++++++++++++++ docs/programming_guide/fl_model.rst | 4 +- .../workflows_and_controllers.rst | 37 ++- .../workflows/base_model_controller.py | 6 +- .../app_common/workflows/model_controller.py | 8 +- 5 files changed, 269 insertions(+), 11 deletions(-) create mode 100644 docs/programming_guide/controllers/model_controller.rst diff --git a/docs/programming_guide/controllers/model_controller.rst b/docs/programming_guide/controllers/model_controller.rst new file mode 100644 index 0000000000..26215dad90 --- /dev/null +++ b/docs/programming_guide/controllers/model_controller.rst @@ -0,0 +1,225 @@ +.. _model_controller: + +################### +ModelController API +################### + +The FLARE :mod:`ModelController` API provides an easy way for users to write and customize FLModel-based controller workflows. + +* Highly flexible with a simple API (run routine and basic communication and utility functions) +* :ref:`fl_model`for the communication data structure, everything else is pure Python +* Option to support pre-existing components and FLARE-specific functionalities + +.. note:: + + The ModelController API is a high-level API meant to simplify writing workflows. + If users prefer or need the full flexibility of the Controller with all the capabilites of FLARE functions, refer to the :ref:`controllers`. + + +Core Concepts +============= + +As an example, we can take a look at the popular federated learning workflow, "FedAvg" which has the following steps: + +#. FL server initializes an initial model +#. For each round (global iteration): + + #. FL server sends the global model to clients + #. Each FL client starts with this global model and trains on their own data + #. Each FL client sends back their trained model + #. FL server aggregates all the models and produces a new global model + + +To implement this workflow using the ModelController there are a few essential parts: + +* Import and subclass the :class:`nvflare.app_common.workflows.model_controller.ModelController`. +* Implement the ``run()`` routine for the workflow logic. +* Utilize ``send_model()`` / ``send_model_and_wait()`` for communication to send tasks with FLModel to target clients, and receive FLModel results. +* Customize workflow using predefined utility functions and components, or implement your own logics. + + +Here is an example of the FedAvg workflow using the :class:`BaseFedAvg` base class: + +.. code-block:: python + + # BaseFedAvg subclasses ModelController and defines common functions and variables such as aggregate(), update_model(), self.start_round, self.num_rounds + class FedAvg(BaseFedAvg): + + # run routine that user must implement + def run(self) -> None: + self.info("Start FedAvg.") + + # load model (by default uses persistor, can provide custom method) + model = self.load_model() + model.start_round = self.start_round + model.total_rounds = self.num_rounds + + # for each round (global iteration) + for self.current_round in range(self.start_round, self.start_round + self.num_rounds): + self.info(f"Round {self.current_round} started.") + model.current_round = self.current_round + + # obtain self.num_clients clients + clients = self.sample_clients(self.num_clients) + + # send model to target clients with default train task, wait to receive results + results = self.send_model_and_wait(targets=clients, data=model) + + # use BaseFedAvg aggregate function + aggregate_results = self.aggregate( + results, aggregate_fn=self.aggregate_fn + ) # using default aggregate_fn with `WeightedAggregationHelper`. Can overwrite self.aggregate_fn with signature Callable[List[FLModel], FLModel] + + # update global model with agggregation results + model = self.update_model(model, aggregate_results) + + # save model (by default uses persistor, can provide custom method) + self.save_model(model) + + self.info("Finished FedAvg.") + + +Below is a comprehensive table overview of the :class:`ModelController` API: + + +.. list-table:: ModelController API + :widths: 25 35 50 + :header-rows: 1 + + * - API + - Description + - API Doc Link + * - run + - Run routine for workflow. + - :func:`run` + * - send_model_and_wait + - Send a task with data to targets (blocking) and wait for results.. + - :func:`send_model_and_wait` + * - send_model + - Send a task with data to targets (non-blocking) with callback. + - :func:`send_model` + * - sample_clients + - Returns a list of num_clients clients. + - :func:`sample_clients` + * - save_model + - Save model with persistor. + - :func:`save_model` + * - load_model + - Load model from persistor. + - :func:`load_model` + + +Communication +============= + +The ModelController uses a task based communication where tasks are sent to targets, and targets execute the tasks and return results. +The :ref:`fl_model` is standardized data structure object that is sent along with each task, and :ref:`fl_model` responses are received for the results. + +.. note:: + + The :ref:`fl_model` object can be any type of data depending on the specific task. + For example, in the "train" and "validate" tasks we send the model parameters along with the task so the target clients can train and validate the model. + However in many other tasks that do not involve sending the model (e.g. "submit_model"), the :ref:`fl_model` can contain any type of data (e.g. metadata, metrics etc.) or may be not be needed at all. + + +send_model_and_wait +------------------- +:func:`send_model_and_wait` is the core communication function which enables users to send tasks to targets, and wait for responses. + +The ``data`` is an :ref:`fl_model` object, and the ``task_name`` is the task for the target executors to execute (Client API executors by default support "train", "validate", and "submit_model", however executors can be written for any arbitrary task name). + +``targets`` can be chosen from client names obtained with ``sample_clients()``. + +Returns the :ref:`fl_model` responses from the target clients once the task is completed (``min_responses`` have been received, or ``timeout`` time has passed). + +send_model +---------- +:func:`send_model` is the non-blocking version of +:func:`send_model_and_wait` with a user-defined callback when receiving responses. + +A callback with the signature ``Callable[[FLModel], None]`` can be passed in, which will be called when a response is received from each target. + +The task is standing until either ``min_responses`` have been received, or ``timeout`` time has passed. +Since this call is asynchronous, the Controller :func:`get_num_standing_tasks` method can be used to get the number of standing tasks for synchronization purposes. + + +Saving & Loading +================ + +persistor +--------- +The :func:`save_model` and :func:`load_model` +functions utilize the configured :class:`ModelPersistor` set in the ModelController ``persistor_id: str = "persistor"`` init argument. + +custom save & load +------------------ +Users can also choose to instead create their own custom save and load functions rather than use a persistor. + +For example we can use PyTorch's save and load functions for the model parameters, and save the FLModel metadata with :mod:`FOBS` separately to different filepaths. + +.. code-block:: python + + import torch + from nvflare.fuel.utils import fobs + + class MyController(ModelController): + ... + def save_model(self, model, filepath=""): + params = model.params + # PyTorch save + torch.save(params, filepath) + + # save FLModel metadata + model.params = {} + fobs.dumpf(model, filepath + ".metadata") + model.params = params + + def load_model(self, filepath=""): + # PyTorch load + params = torch.load(filepath) + + # load FLModel metadata + model = fobs.loadf(filepath + ".metadata") + model.params = params + return model + + +Note: for non-primitive data types such as ``torch.nn.Module`` (used for the initial PyTorch model), we must configure a corresponding FOBS decomposer for serialization and deserialization. +Read more at :github_nvflare_link:`Flare Object Serializer (FOBS) `. + +.. code-block:: python + + from nvflare.app_opt.pt.decomposers import TensorDecomposer + + fobs.register(TensorDecomposer) + + +Additional Functionalities +========================== + +In some cases, more advanced FLARE-specific functionalities may be of use. + +The :mod:`BaseModelController` class provides access to the engine ``self.engine`` and FLContext ``self.fl_ctx`` if needed. +Functions such as ``get_component()`` and ``build_component()`` can be used to load or dynamically build components. + +Furthermore, the underlying :mod:`Controller` class offers additional communication functions and task related utilities. +Many of our pre-existing workflows are based on this lower-level Controller API. +For more details refer to the :ref:`controllers` section. + +Examples +======== + +Examples of basic workflows using the ModelController API: + +* :github_nvflare_link:`Cyclic ` +* :github_nvflare_link:`BaseFedAvg ` +* :github_nvflare_link:`FedAvg ` + +Advanced examples: + +* :github_nvflare_link:`Scaffold ` +* :github_nvflare_link:`FedOpt ` +* :github_nvflare_link:`PTFedAvgEarlyStopping ` +* :github_nvflare_link:`Kaplan-Meier ` +* :github_nvflare_link:`Logistic Regression Newton Raphson ` +* :github_nvflare_link:`FedBPT ` diff --git a/docs/programming_guide/fl_model.rst b/docs/programming_guide/fl_model.rst index 6b2a9bad07..702af3a4de 100644 --- a/docs/programming_guide/fl_model.rst +++ b/docs/programming_guide/fl_model.rst @@ -3,7 +3,7 @@ FLModel ======= -We define a standard data structure :mod:`FLModel` +We define a standard data structure :mod:`FLModel` that captures the common attributes needed for exchanging learning results. This is particularly useful when NVFlare system needs to exchange learning @@ -14,4 +14,4 @@ information from received FLModel, run local training, and put the results in a new FLModel to be sent back. For a detailed explanation of each attributes, please refer to the API doc: -:mod:`FLModel` +:mod:`FLModel` diff --git a/docs/programming_guide/workflows_and_controllers.rst b/docs/programming_guide/workflows_and_controllers.rst index 9a75c9901d..8b8bd6ce24 100644 --- a/docs/programming_guide/workflows_and_controllers.rst +++ b/docs/programming_guide/workflows_and_controllers.rst @@ -7,16 +7,49 @@ A workflow has one or more controllers, each implementing a specific coordinatio CrossSiteValidation controller implements a strategy to let every client site evaluate every other site's model. You can put together a workflow that uses any number of controllers. -We have implemented several server controlled federated learning workflows (fed-average, cyclic controller, cross-site evaluation) with the server-side :ref:`controllers `. +We provide the FLModel-based :ref:`model_controller` which provides a straightforward way for users to write controllers. +We also have the original :ref:`Controller API ` with more FLARE-specific functionalities, which many of our existing workflows are based upon. + +We have implemented several server controlled federated learning workflows (fed-average, cyclic controller, cross-site evaluation) with the server-side controllers. In these workflows, FL clients get tasks assigned by the controller, execute the tasks, and submit results back to the server. In certain cases, if the server cannot be trusted, it should not be involved in communication with sensitive information. To address this concern, NVFlare introduces Client Controlled Workflows (CCWF) to facilitate peer-to-peer communication among clients. -Please refer to the following sections for more details. + +Controllers can be configured in ``config_fed_server.json`` in the workflows section: + +.. code-block:: json + + workflows = [ + { + id = "fedavg_ctl", + name = "FedAvg", + args { + min_clients = 2, + num_rounds = 3, + persistor_id = "persistor" + } + } + ] + +To configure controllers using the JobAPI, define the controller and send it to the server. +This code will automatically generate the server configuration for the controller: + +.. code-block:: python + + controller = FedAvg( + num_clients=2, + num_rounds=3, + persistor_id = "persistor" + ) + job.to(controller, "server") + +Please refer to the following sections for more details about the different types of controllers. .. toctree:: :maxdepth: 3 + controllers/model_controller controllers/controllers controllers/client_controlled_workflows diff --git a/nvflare/app_common/workflows/base_model_controller.py b/nvflare/app_common/workflows/base_model_controller.py index 6ffa7436f1..50aefa62db 100644 --- a/nvflare/app_common/workflows/base_model_controller.py +++ b/nvflare/app_common/workflows/base_model_controller.py @@ -359,8 +359,8 @@ def save_model(self, model): else: self.error("persistor not configured, model will not be saved") - def sample_clients(self, num_clients=None): - clients = self.engine.get_clients() + def sample_clients(self, num_clients: int = None) -> List[str]: + clients = [client.name for client in self.engine.get_clients()] if num_clients: check_positive_int("num_clients", num_clients) @@ -375,7 +375,7 @@ def sample_clients(self, num_clients=None): f"num_clients ({num_clients}) is greater than the number of available clients. Returning all ({len(clients)}) available clients." ) - self.info(f"Sampled clients: {[client.name for client in clients]}") + self.info(f"Sampled clients: {clients}") return clients diff --git a/nvflare/app_common/workflows/model_controller.py b/nvflare/app_common/workflows/model_controller.py index bd46595d14..1c2570919e 100644 --- a/nvflare/app_common/workflows/model_controller.py +++ b/nvflare/app_common/workflows/model_controller.py @@ -103,7 +103,7 @@ def send_model( callback=callback, ) - def load_model(self): + def load_model(self) -> FLModel: """Load initial model from persistor. If persistor is not configured, returns empty FLModel. Returns: @@ -111,7 +111,7 @@ def load_model(self): """ return super().load_model() - def save_model(self, model: FLModel): + def save_model(self, model: FLModel) -> None: """Saves model with persistor. If persistor is not configured, does not save. Args: @@ -122,12 +122,12 @@ def save_model(self, model: FLModel): """ super().save_model(model) - def sample_clients(self, num_clients=None): + def sample_clients(self, num_clients: int = None) -> List[str]: """Returns a list of `num_clients` clients. Args: num_clients: number of clients to return. If None or > number available clients, returns all available clients. Defaults to None. - Returns: list of clients. + Returns: list of clients names. """ return super().sample_clients(num_clients) From 430b7d40189559815c9d68d389ec299c7d242239 Mon Sep 17 00:00:00 2001 From: Yan Cheng <58191769+yanchengnv@users.noreply.github.com> Date: Tue, 30 Jul 2024 20:04:03 -0400 Subject: [PATCH 06/16] [2.5] TIE (Technology for Integrating Everything) and Flower Inegration (#2523) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * added TIE * add license text * fix fstr * support cli applet * add tli applet * develop flower integration * added license text * generate cli cmd by applet * integrate with flower * fix format * fix fl ctx * fix get_command * run hello-flwr-pt job (#7) * run hello-flwr-pt job * remove print outs * abort grpc gracefully * fix example * graceful shutdown of flower * fix msg release * fix formatting * fix formatting * fix formatting * check applet stop * update flwr server commands (#8) * test superlink ready before starting server app * improve log file handling * remove unused import * fixed _superlink_process var bug * change namespace for flower proto; log flower msgs to file and console * add license text * consolidate process mgr * improve docstrings * address pr review issues * address additional pr comments * changed to use flwr proto directly * use PyApplet for running py code * added PyApplet * support server app args; address pr issues * move ccreate_channel to grpc_utils * fix flower output formatting * reformat --------- Co-authored-by: Holger Roth <6304754+holgerroth@users.noreply.github.com> Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- examples/hello-world/hello-flower/README.md | 28 + .../app/config/config_fed_client.json | 17 + .../app/config/config_fed_server.json | 16 + .../jobs/hello-flwr-pt/app/custom/client.py | 53 ++ .../jobs/hello-flwr-pt/app/custom/server.py | 75 +++ .../jobs/hello-flwr-pt/app/custom/task.py | 106 ++++ .../hello-flower/jobs/hello-flwr-pt/meta.json | 10 + .../hello-world/hello-flower/requirements.txt | 4 + nvflare/apis/fl_constant.py | 2 +- nvflare/apis/utils/reliable_message.py | 35 +- nvflare/app_common/tie/__init__.py | 13 + nvflare/app_common/tie/applet.py | 70 +++ nvflare/app_common/tie/cli_applet.py | 109 ++++ nvflare/app_common/tie/connector.py | 264 ++++++++ nvflare/app_common/tie/controller.py | 565 ++++++++++++++++++ nvflare/app_common/tie/defs.py | 43 ++ nvflare/app_common/tie/executor.py | 197 ++++++ nvflare/app_common/tie/process_mgr.py | 209 +++++++ nvflare/app_common/tie/py_applet.py | 240 ++++++++ nvflare/app_opt/flower/__init__.py | 13 + nvflare/app_opt/flower/applet.py | 261 ++++++++ nvflare/app_opt/flower/connectors/__init__.py | 13 + .../flower/connectors/flower_connector.py | 144 +++++ .../connectors/grpc_client_connector.py | 139 +++++ .../connectors/grpc_server_connector.py | 109 ++++ nvflare/app_opt/flower/controller.py | 103 ++++ nvflare/app_opt/flower/defs.py | 52 ++ nvflare/app_opt/flower/executor.py | 59 ++ nvflare/app_opt/flower/grpc_client.py | 100 ++++ nvflare/app_opt/flower/grpc_server.py | 82 +++ nvflare/app_opt/flower/mock/__init__.py | 13 + nvflare/app_opt/flower/mock/applet.py | 76 +++ nvflare/app_opt/flower/mock/controller.py | 25 + nvflare/app_opt/flower/mock/echo_servicer.py | 49 ++ nvflare/app_opt/flower/mock/executor.py | 34 ++ nvflare/app_opt/flower/mock/flower_client.py | 108 ++++ nvflare/app_opt/flower/mock/flower_server.py | 48 ++ nvflare/app_opt/flower/utils.py | 68 +++ nvflare/fuel/utils/grpc_utils.py | 42 ++ nvflare/private/aux_runner.py | 2 +- .../private/fed/client/client_app_runner.py | 6 +- .../private/fed/client/client_json_config.py | 6 +- .../private/fed/server/server_app_runner.py | 6 +- .../private/fed/server/server_json_config.py | 6 +- 44 files changed, 3610 insertions(+), 10 deletions(-) create mode 100644 examples/hello-world/hello-flower/README.md create mode 100644 examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_client.json create mode 100644 examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_server.json create mode 100644 examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/client.py create mode 100644 examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/server.py create mode 100644 examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/task.py create mode 100644 examples/hello-world/hello-flower/jobs/hello-flwr-pt/meta.json create mode 100644 examples/hello-world/hello-flower/requirements.txt create mode 100644 nvflare/app_common/tie/__init__.py create mode 100644 nvflare/app_common/tie/applet.py create mode 100644 nvflare/app_common/tie/cli_applet.py create mode 100644 nvflare/app_common/tie/connector.py create mode 100644 nvflare/app_common/tie/controller.py create mode 100644 nvflare/app_common/tie/defs.py create mode 100644 nvflare/app_common/tie/executor.py create mode 100644 nvflare/app_common/tie/process_mgr.py create mode 100644 nvflare/app_common/tie/py_applet.py create mode 100644 nvflare/app_opt/flower/__init__.py create mode 100644 nvflare/app_opt/flower/applet.py create mode 100644 nvflare/app_opt/flower/connectors/__init__.py create mode 100644 nvflare/app_opt/flower/connectors/flower_connector.py create mode 100644 nvflare/app_opt/flower/connectors/grpc_client_connector.py create mode 100644 nvflare/app_opt/flower/connectors/grpc_server_connector.py create mode 100644 nvflare/app_opt/flower/controller.py create mode 100644 nvflare/app_opt/flower/defs.py create mode 100644 nvflare/app_opt/flower/executor.py create mode 100644 nvflare/app_opt/flower/grpc_client.py create mode 100644 nvflare/app_opt/flower/grpc_server.py create mode 100644 nvflare/app_opt/flower/mock/__init__.py create mode 100644 nvflare/app_opt/flower/mock/applet.py create mode 100644 nvflare/app_opt/flower/mock/controller.py create mode 100644 nvflare/app_opt/flower/mock/echo_servicer.py create mode 100644 nvflare/app_opt/flower/mock/executor.py create mode 100644 nvflare/app_opt/flower/mock/flower_client.py create mode 100644 nvflare/app_opt/flower/mock/flower_server.py create mode 100644 nvflare/app_opt/flower/utils.py create mode 100644 nvflare/fuel/utils/grpc_utils.py diff --git a/examples/hello-world/hello-flower/README.md b/examples/hello-world/hello-flower/README.md new file mode 100644 index 0000000000..a0c303a832 --- /dev/null +++ b/examples/hello-world/hello-flower/README.md @@ -0,0 +1,28 @@ +# Flower App (PyTorch) in NVIDIA FLARE + +In this example, we run 2 Flower clients and Flower Server in parallel using NVFlare's simulator. + +## Preconditions + +To run Flower code in NVFlare, we created a job, including an app with the following custom folder content +```bash +$ tree jobs/hello-flwr-pt +. +├── client.py # <-- contains `ClientApp` +├── server.py # <-- contains `ServerApp` +├── task.py # <-- task-specific code (model, data) +``` +Note, this code is directly copied from Flower's [app-pytorch](https://github.com/adap/flower/tree/main/examples/app-pytorch) example. + +## Install dependencies +To run this job with NVFlare, we first need to install the dependencies. +```bash +pip install -r requirements.txt +``` + +## Run a simulation + +Next, we run 2 Flower clients and Flower Server in parallel using NVFlare's simulator. +```bash +nvflare simulator jobs/hello-flwr-pt -n 2 -t 2 -w /tmp/nvflare/flwr +``` diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_client.json b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_client.json new file mode 100644 index 0000000000..e1e74ade3f --- /dev/null +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_client.json @@ -0,0 +1,17 @@ +{ + "format_version": 2, + "executors": [ + { + "tasks": ["*"], + "executor": { + "path": "nvflare.app_opt.flower.executor.FlowerExecutor", + "args": { + "client_app": "client:app" + } + } + } + ], + "task_result_filters": [], + "task_data_filters": [], + "components": [] +} \ No newline at end of file diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_server.json b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_server.json new file mode 100644 index 0000000000..dfb4cab82d --- /dev/null +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/config/config_fed_server.json @@ -0,0 +1,16 @@ +{ + "format_version": 2, + "task_data_filters": [], + "task_result_filters": [], + "components": [ + ], + "workflows": [ + { + "id": "ctl", + "path": "nvflare.app_opt.flower.controller.FlowerController", + "args": { + "server_app": "server:app" + } + } + ] +} \ No newline at end of file diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/client.py b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/client.py new file mode 100644 index 0000000000..9e674b27c3 --- /dev/null +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/client.py @@ -0,0 +1,53 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from flwr.client import ClientApp, NumPyClient +from task import DEVICE, Net, get_weights, load_data, set_weights, test, train + +# Load model and data (simple CNN, CIFAR-10) +net = Net().to(DEVICE) +trainloader, testloader = load_data() + + +# Define FlowerClient and client_fn +class FlowerClient(NumPyClient): + def fit(self, parameters, config): + set_weights(net, parameters) + results = train(net, trainloader, testloader, epochs=1, device=DEVICE) + return get_weights(net), len(trainloader.dataset), results + + def evaluate(self, parameters, config): + set_weights(net, parameters) + loss, accuracy = test(net, testloader) + return loss, len(testloader.dataset), {"accuracy": accuracy} + + +def client_fn(cid: str): + """Create and return an instance of Flower `Client`.""" + return FlowerClient().to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, +) + + +# Legacy mode +if __name__ == "__main__": + from flwr.client import start_client + + start_client( + server_address="127.0.0.1:8080", + client=FlowerClient().to_client(), + ) diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/server.py b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/server.py new file mode 100644 index 0000000000..8083a6b802 --- /dev/null +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/server.py @@ -0,0 +1,75 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Tuple + +from flwr.common import Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerConfig +from flwr.server.strategy import FedAvg +from task import Net, get_weights + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + examples = [num_examples for num_examples, _ in metrics] + + # Multiply accuracy of each client by number of examples used + train_losses = [num_examples * m["train_loss"] for num_examples, m in metrics] + train_accuracies = [num_examples * m["train_accuracy"] for num_examples, m in metrics] + val_losses = [num_examples * m["val_loss"] for num_examples, m in metrics] + val_accuracies = [num_examples * m["val_accuracy"] for num_examples, m in metrics] + + # Aggregate and return custom metric (weighted average) + return { + "train_loss": sum(train_losses) / sum(examples), + "train_accuracy": sum(train_accuracies) / sum(examples), + "val_loss": sum(val_losses) / sum(examples), + "val_accuracy": sum(val_accuracies) / sum(examples), + } + + +# Initialize model parameters +ndarrays = get_weights(Net()) +parameters = ndarrays_to_parameters(ndarrays) + + +# Define strategy +strategy = FedAvg( + fraction_fit=1.0, # Select all available clients + fraction_evaluate=0.0, # Disable evaluation + min_available_clients=2, + fit_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, +) + + +# Define config +config = ServerConfig(num_rounds=3) + + +# Flower ServerApp +app = ServerApp( + config=config, + strategy=strategy, +) + + +# Legacy mode +if __name__ == "__main__": + from flwr.server import start_server + + start_server( + server_address="0.0.0.0:8080", + config=config, + strategy=strategy, + ) diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/task.py b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/task.py new file mode 100644 index 0000000000..7a5c1a0514 --- /dev/null +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/app/custom/task.py @@ -0,0 +1,106 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import OrderedDict +from logging import INFO + +import torch +import torch.nn as nn +import torch.nn.functional as F +from flwr.common.logger import log +from torch.utils.data import DataLoader +from torchvision.datasets import CIFAR10 +from torchvision.transforms import Compose, Normalize, ToTensor + +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self) -> None: + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def load_data(): + """Load CIFAR-10 (training and test set).""" + trf = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + trainset = CIFAR10("./data", train=True, download=True, transform=trf) + testset = CIFAR10("./data", train=False, download=True, transform=trf) + return DataLoader(trainset, batch_size=32, shuffle=True), DataLoader(testset) + + +def train(net, trainloader, valloader, epochs, device): + """Train the model on the training set.""" + log(INFO, "Starting training...") + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + net.train() + for _ in range(epochs): + for images, labels in trainloader: + images, labels = images.to(device), labels.to(device) + optimizer.zero_grad() + loss = criterion(net(images), labels) + loss.backward() + optimizer.step() + + train_loss, train_acc = test(net, trainloader) + val_loss, val_acc = test(net, valloader) + + results = { + "train_loss": train_loss, + "train_accuracy": train_acc, + "val_loss": val_loss, + "val_accuracy": val_acc, + } + return results + + +def test(net, testloader): + """Validate the model on the test set.""" + net.to(DEVICE) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for images, labels in testloader: + outputs = net(images.to(DEVICE)) + labels = labels.to(DEVICE) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) diff --git a/examples/hello-world/hello-flower/jobs/hello-flwr-pt/meta.json b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/meta.json new file mode 100644 index 0000000000..90bc82e8ed --- /dev/null +++ b/examples/hello-world/hello-flower/jobs/hello-flwr-pt/meta.json @@ -0,0 +1,10 @@ +{ + "name": "hello-flwr-pt", + "resource_spec": {}, + "min_clients" : 2, + "deploy_map": { + "app": [ + "@ALL" + ] + } +} diff --git a/examples/hello-world/hello-flower/requirements.txt b/examples/hello-world/hello-flower/requirements.txt new file mode 100644 index 0000000000..1d8990f84a --- /dev/null +++ b/examples/hello-world/hello-flower/requirements.txt @@ -0,0 +1,4 @@ +nvflare~=2.5.0rc +flwr[simulation]>=1.8.0 +torch==2.2.1 +torchvision==0.17.1 diff --git a/nvflare/apis/fl_constant.py b/nvflare/apis/fl_constant.py index 9b9b844774..52005ec305 100644 --- a/nvflare/apis/fl_constant.py +++ b/nvflare/apis/fl_constant.py @@ -42,7 +42,6 @@ class ReturnCode(object): EARLY_TERMINATION = "EARLY_TERMINATION" SERVER_NOT_READY = "SERVER_NOT_READY" SERVICE_UNAVAILABLE = "SERVICE_UNAVAILABLE" - EARLY_TERMINATION = "EARLY_TERMINATION" class MachineStatus(Enum): @@ -494,6 +493,7 @@ class SystemVarName: JOB_ID = "JOB_ID" # Job ID ROOT_URL = "ROOT_URL" # the URL of the Service Provider (server) SECURE_MODE = "SECURE_MODE" # whether the system is running in secure mode + JOB_CUSTOM_DIR = "JOB_CUSTOM_DIR" # custom dir of the job class RunnerTask: diff --git a/nvflare/apis/utils/reliable_message.py b/nvflare/apis/utils/reliable_message.py index 71f7365847..bb5e01792c 100644 --- a/nvflare/apis/utils/reliable_message.py +++ b/nvflare/apis/utils/reliable_message.py @@ -98,6 +98,9 @@ def __init__(self, topic, request_handler_f, executor, per_msg_timeout, tx_timeo self.replying = False def process(self, request: Shareable, fl_ctx: FLContext) -> Shareable: + if not ReliableMessage.is_available(): + return make_reply(ReturnCode.SERVICE_UNAVAILABLE) + self.tx_id = request.get_header(HEADER_TX_ID) op = request.get_header(HEADER_OP) peer_ctx = fl_ctx.get_peer_context() @@ -111,9 +114,14 @@ def process(self, request: Shareable, fl_ctx: FLContext) -> Shareable: self.tx_timeout = request.get_header(HEADER_TX_TIMEOUT) # start processing - ReliableMessage.debug(fl_ctx, f"started processing request of topic {self.topic}") - self.executor.submit(self._do_request, request, fl_ctx) - return _status_reply(STATUS_IN_PROCESS) # ack + ReliableMessage.info(fl_ctx, f"started processing request of topic {self.topic}") + try: + self.executor.submit(self._do_request, request, fl_ctx) + return _status_reply(STATUS_IN_PROCESS) # ack + except Exception as ex: + # it is possible that the RM is already closed (self.executor is shut down) + ReliableMessage.error(fl_ctx, f"failed to submit request: {secure_format_exception(ex)}") + return make_reply(ReturnCode.SERVICE_UNAVAILABLE) elif self.result: # we already finished processing - send the result back ReliableMessage.info(fl_ctx, "resend result back to requester") @@ -169,6 +177,8 @@ def _try_reply(self, fl_ctx: FLContext): # release the receiver kept by the ReliableMessage! ReliableMessage.release_request_receiver(self, fl_ctx) else: + # unsure whether the reply was sent successfully + # do not release the request receiver in case the requester asks for result in a query ReliableMessage.error( fl_ctx, f"failed to send reply in {time_spent} secs: {rc=}; will wait for requester to query" ) @@ -192,6 +202,8 @@ def _do_request(self, request: Shareable, fl_ctx: FLContext): class _ReplyReceiver: + """This class handles reliable message replies on the sending end""" + def __init__(self, tx_id: str, per_msg_timeout: float, tx_timeout: float): self.tx_id = tx_id self.tx_start_time = time.time() @@ -425,6 +437,21 @@ def warning(cls, fl_ctx: FLContext, msg: str): def error(cls, fl_ctx: FLContext, msg: str): cls._logger.error(cls._log_msg(fl_ctx, msg)) + @classmethod + def is_available(cls): + """Return whether the ReliableMessage service is available + + Returns: + + """ + if cls._shutdown_asked: + return False + + if not cls._enabled: + return False + + return True + @classmethod def debug(cls, fl_ctx: FLContext, msg: str): cls._logger.debug(cls._log_msg(fl_ctx, msg)) @@ -614,7 +641,7 @@ def _query_result( fl_ctx=fl_ctx, ) - # Ignore query result if result is already received + # Ignore query result if reply result is already received if receiver.result_ready.is_set(): return receiver.result diff --git a/nvflare/app_common/tie/__init__.py b/nvflare/app_common/tie/__init__.py new file mode 100644 index 0000000000..d9155f923f --- /dev/null +++ b/nvflare/app_common/tie/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nvflare/app_common/tie/applet.py b/nvflare/app_common/tie/applet.py new file mode 100644 index 0000000000..228e08a0c8 --- /dev/null +++ b/nvflare/app_common/tie/applet.py @@ -0,0 +1,70 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from abc import ABC, abstractmethod + +from nvflare.apis.fl_component import FLComponent +from nvflare.apis.fl_context import FLContext + + +class Applet(ABC, FLComponent): + + """An Applet implements App (server or client) processing logic.""" + + def __init__(self): + FLComponent.__init__(self) + + def initialize(self, fl_ctx: FLContext): + """Called by Controller/Executor to initialize the applet. + This happens when the job is about to start. + + Args: + fl_ctx: FL context + + Returns: None + + """ + pass + + @abstractmethod + def start(self, app_ctx: dict): + """Called to start the execution of the applet. + + Args: + app_ctx: the contextual info to help the applet execution + + Returns: None + + """ + pass + + @abstractmethod + def stop(self, timeout=0.0) -> int: + """Called to stop the applet. + + Args: + timeout: the max amount of time (seconds) to stop the applet + + Returns: the exit code after stopped + + """ + pass + + @abstractmethod + def is_stopped(self) -> (bool, int): + """Called to check whether the applet is already stopped. + + Returns: whether the applet is stopped, and the exit code if stopped. + + """ + pass diff --git a/nvflare/app_common/tie/cli_applet.py b/nvflare/app_common/tie/cli_applet.py new file mode 100644 index 0000000000..cd17bf0eb7 --- /dev/null +++ b/nvflare/app_common/tie/cli_applet.py @@ -0,0 +1,109 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time +from abc import ABC, abstractmethod + +from nvflare.security.logging import secure_format_exception + +from .applet import Applet +from .defs import Constant +from .process_mgr import CommandDescriptor, start_process + + +class CLIApplet(Applet, ABC): + def __init__(self): + """Constructor of CLIApplet, which runs the applet as a subprocess started with CLI command.""" + Applet.__init__(self) + self._proc_mgr = None + self._start_error = False + + @abstractmethod + def get_command(self, app_ctx: dict) -> CommandDescriptor: + """Subclass must implement this method to return the CLI command to be executed. + + Args: + app_ctx: the applet context that contains execution env info + + Returns: a CommandDescriptor that describes the CLI command + + """ + pass + + def start(self, app_ctx: dict): + """Start the execution of the applet. + + Args: + app_ctx: the applet run context + + Returns: + + """ + cmd_desc = self.get_command(app_ctx) + if not cmd_desc: + raise RuntimeError("failed to get cli command from app context") + + fl_ctx = app_ctx.get(Constant.APP_CTX_FL_CONTEXT) + try: + self._proc_mgr = start_process(cmd_desc, fl_ctx) + except Exception as ex: + self.logger.error(f"exception starting applet '{cmd_desc.cmd}': {secure_format_exception(ex)}") + self._start_error = True + + def stop(self, timeout=0.0) -> int: + """Stop the applet + + Args: + timeout: amount of time to wait for the applet to stop by itself. If the applet does not stop on + its own within this time, we'll forcefully stop it by kill. + + Returns: exit code + + """ + mgr = self._proc_mgr + self._proc_mgr = None + + if not mgr: + raise RuntimeError("no process manager to stop") + + if timeout > 0: + # wait for the applet to stop by itself + start = time.time() + while time.time() - start < timeout: + rc = mgr.poll() + if rc is not None: + # already stopped + self.logger.info(f"applet stopped ({rc=}) after {time.time()-start} seconds") + break + time.sleep(0.1) + + rc = mgr.stop() + if rc is None: + self.logger.warning(f"killed the applet process after waiting {timeout} seconds") + return -9 + else: + return rc + + def is_stopped(self) -> (bool, int): + if self._start_error: + return True, Constant.EXIT_CODE_CANT_START + + mgr = self._proc_mgr + if mgr: + return_code = mgr.poll() + if return_code is None: + return False, 0 + else: + return True, return_code + else: + return True, 0 diff --git a/nvflare/app_common/tie/connector.py b/nvflare/app_common/tie/connector.py new file mode 100644 index 0000000000..8afa86aedb --- /dev/null +++ b/nvflare/app_common/tie/connector.py @@ -0,0 +1,264 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading +import time +from abc import ABC, abstractmethod +from typing import Optional + +from nvflare.apis.fl_component import FLComponent +from nvflare.apis.fl_context import FLContext +from nvflare.apis.shareable import Shareable +from nvflare.apis.signal import Signal +from nvflare.apis.utils.reliable_message import ReliableMessage +from nvflare.app_common.tie.applet import Applet +from nvflare.app_common.tie.defs import Constant +from nvflare.fuel.f3.cellnet.fqcn import FQCN +from nvflare.fuel.utils.validation_utils import check_object_type + + +class Connector(ABC, FLComponent): + """ + Connectors are used to integrate FLARE with an Applet (Server or Client) in run time. + Each type of applet requires an appropriate connector to integrate it with FLARE's Controller or Executor. + The Connector class defines commonly required methods for all Connector implementations. + """ + + def __init__(self): + """Constructor of Connector""" + FLComponent.__init__(self) + self.abort_signal = None + self.applet = None + self.engine = None + + def set_applet(self, applet: Applet): + """Set the applet that will be used to run app processing logic. + Note that the connector is only responsible for starting the applet appropriately (in a separate thread or in a + separate process). + + Args: + applet: the applet to be set + + Returns: None + + """ + if not isinstance(applet, Applet): + raise TypeError(f"applet must be Applet but got {type(applet)}") + self.applet = applet + + def set_abort_signal(self, abort_signal: Signal): + """Called by Controller/Executor to set the abort_signal. + + The abort_signal is assigned by FLARE Controller/Executor. It is used by the Controller/Executor + to tell the connector that the job has been aborted. + + Args: + abort_signal: the abort signal assigned by the caller. + + Returns: None + + """ + check_object_type("abort_signal", abort_signal, Signal) + self.abort_signal = abort_signal + + def initialize(self, fl_ctx: FLContext): + """Called by the Controller/Executor to initialize the connector. + + Args: + fl_ctx: the FL context + + Returns: None + + """ + self.engine = fl_ctx.get_engine() + + @abstractmethod + def start(self, fl_ctx: FLContext): + """Called by Controller/Executor to start the connector. + If any error occurs, this method should raise an exception. + + Args: + fl_ctx: the FL context. + + Returns: None + + """ + pass + + @abstractmethod + def stop(self, fl_ctx: FLContext): + """Called by Controller/Executor to stop the connector. + If any error occurs, this method should raise an exception. + + Args: + fl_ctx: the FL context. + + Returns: None + + """ + pass + + @abstractmethod + def configure(self, config: dict, fl_ctx: FLContext): + """Called by Controller/Executor to configure the connector. + If any error occurs, this method should raise an exception. + + Args: + config: config data + fl_ctx: the FL context + + Returns: None + + """ + pass + + def _is_stopped(self) -> (bool, int): + """Called by the connector's monitor to know whether the connector is stopped. + Note that this method is not called by Controller/Executor. + + Returns: a tuple of: whether the connector is stopped, and return code (if stopped) + + Note that a non-zero return code is considered abnormal completion of the connector. + + """ + return self.is_applet_stopped() + + def _monitor(self, fl_ctx: FLContext, connector_stopped_cb): + while True: + if self.abort_signal.triggered: + # asked to abort + self.stop(fl_ctx) + return + + stopped, rc = self._is_stopped() + if stopped: + # connector already stopped - notify the caller + connector_stopped_cb(rc, fl_ctx) + return + + time.sleep(0.1) + + def monitor(self, fl_ctx: FLContext, connector_stopped_cb): + """Called by Controller/Executor to monitor the health of the connector. + + The monitor periodically checks the abort signal. Once set, it calls the connector's stop() method + to stop the running of the app. + + The monitor also periodically checks whether the connector is already stopped (by calling the is_stopped + method). If the connector is stopped, the monitor will call the specified connector_stopped_cb. + + Args: + fl_ctx: FL context + connector_stopped_cb: the callback function to be called when the connector is stopped. + + Returns: None + + """ + if not callable(connector_stopped_cb): + raise RuntimeError(f"connector_stopped_cb must be callable but got {type(connector_stopped_cb)}") + + # start the monitor in a separate daemon thread! + t = threading.Thread(target=self._monitor, args=(fl_ctx, connector_stopped_cb), daemon=True) + t.start() + + def start_applet(self, app_ctx: dict, fl_ctx: FLContext): + """Start the applet set to the connector. + + Args: + app_ctx: the contextual info for running the applet + fl_ctx: FL context + + Returns: None + + """ + if not self.applet: + raise RuntimeError("applet has not been set!") + + app_ctx[Constant.APP_CTX_FL_CONTEXT] = fl_ctx + self.applet.start(app_ctx) + + def stop_applet(self, timeout=0.0) -> int: + """Stop the running of the applet + + Returns: exit code of the applet + + """ + return self.applet.stop(timeout) + + def is_applet_stopped(self) -> (bool, int): + """Check whether the applet is already stopped + + Returns: a tuple of (whether the applet is stopped, exit code) + + """ + applet = self.applet + if applet: + return applet.is_stopped() + else: + self.logger.warning("applet is not set with the connector") + return True, 0 + + def send_request( + self, + target: Optional[str], + op: str, + request: Shareable, + per_msg_timeout: float, + tx_timeout: float, + fl_ctx: Optional[FLContext], + ) -> Shareable: + """Send app request to the specified target via FLARE ReliableMessage. + + Args: + target: the destination of the request. If not specified, default to server. + op: the operation + request: operation data + per_msg_timeout: per-message timeout + tx_timeout: transaction timeout + fl_ctx: FL context. If not provided, this method will create a new FL context. + + Returns: + operation result + """ + request.set_header(Constant.MSG_KEY_OP, op) + if not target: + target = FQCN.ROOT_SERVER + + if not fl_ctx: + fl_ctx = self.engine.new_context() + + self.logger.debug(f"sending request with RM: {op=}") + return ReliableMessage.send_request( + target=target, + topic=Constant.TOPIC_APP_REQUEST, + request=request, + per_msg_timeout=per_msg_timeout, + tx_timeout=tx_timeout, + abort_signal=self.abort_signal, + fl_ctx=fl_ctx, + ) + + def process_app_request(self, op: str, req: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: + """Called by Controller/Executor to process a request from an applet on another site. + + Args: + op: the op code of the request + req: the request to be sent + fl_ctx: FL context + abort_signal: abort signal that could be triggered during the request processing + + Returns: processing result as Shareable object + + """ + pass diff --git a/nvflare/app_common/tie/controller.py b/nvflare/app_common/tie/controller.py new file mode 100644 index 0000000000..0ebb39b93e --- /dev/null +++ b/nvflare/app_common/tie/controller.py @@ -0,0 +1,565 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import threading +import time +from abc import ABC, abstractmethod + +from nvflare.apis.client import Client +from nvflare.apis.controller_spec import ClientTask, Task +from nvflare.apis.fl_context import FLContext +from nvflare.apis.impl.controller import Controller +from nvflare.apis.shareable import ReturnCode, Shareable, make_reply +from nvflare.apis.signal import Signal +from nvflare.apis.utils.reliable_message import ReliableMessage +from nvflare.app_common.tie.connector import Connector +from nvflare.fuel.utils.validation_utils import check_number_range, check_positive_number +from nvflare.security.logging import secure_format_exception + +from .applet import Applet +from .defs import Constant + + +class _ClientStatus: + """ + Objects of this class keep processing status of each FL client during job execution. + """ + + def __init__(self): + # Set when the client's config reply is received and the reply return code is OK. + # If the client failed to reply or the return code is not OK, this value is not set. + self.configured_time = None + + # Set when the client's start reply is received and the reply return code is OK. + # If the client failed to reply or the return code is not OK, this value is not set. + self.started_time = None + + # operation of the last request from this client + self.last_op = None + + # time of the last op request from this client + self.last_op_time = time.time() + + # whether the app process is finished on this client + self.app_done = False + + +class TieController(Controller, ABC): + def __init__( + self, + configure_task_name=Constant.CONFIG_TASK_NAME, + configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT, + start_task_name=Constant.START_TASK_NAME, + start_task_timeout=Constant.START_TASK_TIMEOUT, + job_status_check_interval: float = Constant.JOB_STATUS_CHECK_INTERVAL, + max_client_op_interval: float = Constant.MAX_CLIENT_OP_INTERVAL, + progress_timeout: float = Constant.WORKFLOW_PROGRESS_TIMEOUT, + ): + """ + Constructor + + Args: + configure_task_name - name of the config task + configure_task_timeout - time to wait for clients’ responses to the config task before timeout. + start_task_name - name of the start task + start_task_timeout - time to wait for clients’ responses to the start task before timeout. + job_status_check_interval - how often to check client statuses of the job + max_client_op_interval - max amount of time allowed between app ops from a client + progress_timeout- the maximum amount of time allowed for the workflow to not make any progress. + In other words, at least one participating client must have made progress during this time. + Otherwise, the workflow will be considered to be in trouble and the job will be aborted. + """ + Controller.__init__(self) + self.configure_task_name = configure_task_name + self.start_task_name = start_task_name + self.start_task_timeout = start_task_timeout + self.configure_task_timeout = configure_task_timeout + self.max_client_op_interval = max_client_op_interval + self.progress_timeout = progress_timeout + self.job_status_check_interval = job_status_check_interval + + self.connector = None + self.participating_clients = None + self.status_lock = threading.Lock() + self.client_statuses = {} # client name => ClientStatus + self.abort_signal = None + + check_number_range("configure_task_timeout", configure_task_timeout, min_value=1) + check_number_range("start_task_timeout", start_task_timeout, min_value=1) + check_positive_number("job_status_check_interval", job_status_check_interval) + check_number_range("max_client_op_interval", max_client_op_interval, min_value=10.0) + check_number_range("progress_timeout", progress_timeout, min_value=5.0) + + @abstractmethod + def get_client_config_params(self, fl_ctx: FLContext) -> dict: + """Called by the TieController to get config parameters to be sent to FL clients. + Subclass of TieController must implement this method. + + Args: + fl_ctx: FL context + + Returns: a dict of config params + + """ + pass + + @abstractmethod + def get_connector_config_params(self, fl_ctx: FLContext) -> dict: + """Called by the TieController to get config parameters for configuring the connector. + Subclass of TieController must implement this method. + + Args: + fl_ctx: FL context + + Returns: a dict of config params + + """ + pass + + @abstractmethod + def get_connector(self, fl_ctx: FLContext) -> Connector: + """Called by the TieController to get the Connector to be used with the controller. + Subclass of TieController must implement this method. + + Args: + fl_ctx: FL context + + Returns: a Connector object + + """ + pass + + @abstractmethod + def get_applet(self, fl_ctx: FLContext) -> Applet: + """Called by the TieController to get the Applet to be used with the controller. + Subclass of TieController must implement this method. + + Args: + fl_ctx: FL context + + Returns: an Applet object + + """ + pass + + def start_controller(self, fl_ctx: FLContext): + """Start the controller. + It first tries to get the connector and applet to be used. + It then initializes the applet, set the applet to the connector, and initializes the connector. + It finally registers message handlers for APP_REQUEST and CLIENT_DONE. + If error occurs in any step, the job is stopped. + + Note: if a subclass overwrites this method, it must call super().start_controller()! + + Args: + fl_ctx: the FL context + + Returns: None + + """ + all_clients = self._engine.get_clients() + self.participating_clients = [t.name for t in all_clients] + + for c in self.participating_clients: + self.client_statuses[c] = _ClientStatus() + + connector = self.get_connector(fl_ctx) + if not connector: + self.system_panic("cannot get connector", fl_ctx) + return None + + if not isinstance(connector, Connector): + self.system_panic( + f"invalid connector: expect Connector but got {type(connector)}", + fl_ctx, + ) + return None + + applet = self.get_applet(fl_ctx) + if not applet: + self.system_panic("cannot get applet", fl_ctx) + return + + if not isinstance(applet, Applet): + self.system_panic( + f"invalid applet: expect Applet but got {type(applet)}", + fl_ctx, + ) + return + + applet.initialize(fl_ctx) + connector.set_applet(applet) + connector.initialize(fl_ctx) + self.connector = connector + + engine = fl_ctx.get_engine() + engine.register_aux_message_handler( + topic=Constant.TOPIC_CLIENT_DONE, + message_handle_func=self._process_client_done, + ) + ReliableMessage.register_request_handler( + topic=Constant.TOPIC_APP_REQUEST, + handler_f=self._handle_app_request, + fl_ctx=fl_ctx, + ) + + def _trigger_stop(self, fl_ctx: FLContext, error=None): + # first trigger the abort_signal to tell all components (mainly the controller's control_flow and connector) + # that check this signal to abort. + if self.abort_signal: + self.abort_signal.trigger(value=True) + + # if there is error, call system_panic to terminate the job with proper status. + # if no error, the job will end normally. + if error: + self.system_panic(reason=error, fl_ctx=fl_ctx) + + def _is_stopped(self): + # check whether the abort signal is triggered + return self.abort_signal and self.abort_signal.triggered + + def _update_client_status(self, fl_ctx: FLContext, op=None, client_done=False): + """Update the status of the requesting client. + + Args: + fl_ctx: FL context + op: the app operation requested + client_done: whether the client is done + + Returns: None + + """ + with self.status_lock: + peer_ctx = fl_ctx.get_peer_context() + if not peer_ctx: + self.log_error(fl_ctx, "missing peer_ctx from fl_ctx") + return + if not isinstance(peer_ctx, FLContext): + self.log_error(fl_ctx, f"expect peer_ctx to be FLContext but got {type(peer_ctx)}") + return + client_name = peer_ctx.get_identity_name() + if not client_name: + self.log_error(fl_ctx, "missing identity from peer_ctx") + return + status = self.client_statuses.get(client_name) + if not status: + self.log_error(fl_ctx, f"no status record for client {client_name}") + assert isinstance(status, _ClientStatus) + if op: + status.last_op = op + if client_done: + status.app_done = client_done + status.last_op_time = time.time() + + def _process_client_done(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable: + """Process the ClientDone report for a client + + Args: + topic: topic of the message + request: request to be processed + fl_ctx: the FL context + + Returns: reply to the client + + """ + self.log_debug(fl_ctx, f"_process_client_done {topic}") + exit_code = request.get(Constant.MSG_KEY_EXIT_CODE) + + if exit_code == 0: + self.log_info(fl_ctx, f"app client is done with exit code {exit_code}") + elif exit_code == Constant.EXIT_CODE_CANT_START: + self.log_error(fl_ctx, f"app client failed to start (exit code {exit_code})") + self.system_panic("app client failed to start", fl_ctx) + else: + # Should we stop here? + # Problem is that even if the exit_code is not 0, we can't say the job failed. + self.log_warning(fl_ctx, f"app client is done with exit code {exit_code}") + + self._update_client_status(fl_ctx, client_done=True) + return make_reply(ReturnCode.OK) + + def _handle_app_request(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable: + """Handle app request from applets on other sites + It calls the connector to process the app request. If the connector fails to process the request, the + job will be stopped. + + Args: + topic: message topic + request: the request data + fl_ctx: FL context + + Returns: processing result as a Shareable object + + """ + self.log_debug(fl_ctx, f"_handle_app_request {topic}") + op = request.get_header(Constant.MSG_KEY_OP) + if self._is_stopped(): + self.log_warning(fl_ctx, f"dropped app request ({op=}) since server is already stopped") + return make_reply(ReturnCode.SERVICE_UNAVAILABLE) + + # we assume app protocol to be very strict, we'll stop the control flow when any error occurs + process_error = "app request process error" + self._update_client_status(fl_ctx, op=op) + try: + reply = self.connector.process_app_request(op, request, fl_ctx, self.abort_signal) + except Exception as ex: + self.log_exception(fl_ctx, f"exception processing app request {op=}: {secure_format_exception(ex)}") + self._trigger_stop(fl_ctx, process_error) + return make_reply(ReturnCode.EXECUTION_EXCEPTION) + + self.log_info(fl_ctx, f"received reply for app request '{op=}'") + reply.set_header(Constant.MSG_KEY_OP, op) + return reply + + def _configure_clients(self, abort_signal: Signal, fl_ctx: FLContext): + self.log_info(fl_ctx, f"Configuring clients {self.participating_clients}") + + try: + config = self.get_client_config_params(fl_ctx) + except Exception as ex: + self.system_panic(f"exception get_client_config_params: {secure_format_exception(ex)}", fl_ctx) + return False + + if config is None: + self.system_panic("no config data is returned", fl_ctx) + return False + + shareable = Shareable() + shareable[Constant.MSG_KEY_CONFIG] = config + + task = Task( + name=self.configure_task_name, + data=shareable, + timeout=self.configure_task_timeout, + result_received_cb=self._process_configure_reply, + ) + + self.log_info(fl_ctx, f"sending task {self.configure_task_name} to clients {self.participating_clients}") + start_time = time.time() + self.broadcast_and_wait( + task=task, + targets=self.participating_clients, + min_responses=len(self.participating_clients), + fl_ctx=fl_ctx, + abort_signal=abort_signal, + ) + + time_taken = time.time() - start_time + self.log_info(fl_ctx, f"client configuration took {time_taken} seconds") + + failed_clients = [] + for c, cs in self.client_statuses.items(): + assert isinstance(cs, _ClientStatus) + if not cs.configured_time: + failed_clients.append(c) + + # if any client failed to configure, terminate the job + if failed_clients: + self.system_panic(f"failed to configure clients {failed_clients}", fl_ctx) + return False + + self.log_info(fl_ctx, f"successfully configured clients {self.participating_clients}") + return True + + def _start_clients(self, abort_signal: Signal, fl_ctx: FLContext): + self.log_info(fl_ctx, f"Starting clients {self.participating_clients}") + + task = Task( + name=self.start_task_name, + data=Shareable(), + timeout=self.start_task_timeout, + result_received_cb=self._process_start_reply, + ) + + self.log_info(fl_ctx, f"sending task {self.start_task_name} to clients {self.participating_clients}") + start_time = time.time() + self.broadcast_and_wait( + task=task, + targets=self.participating_clients, + min_responses=len(self.participating_clients), + fl_ctx=fl_ctx, + abort_signal=abort_signal, + ) + + time_taken = time.time() - start_time + self.log_info(fl_ctx, f"client starting took {time_taken} seconds") + + failed_clients = [] + for c, cs in self.client_statuses.items(): + assert isinstance(cs, _ClientStatus) + if not cs.started_time: + failed_clients.append(c) + + # if any client failed to start, terminate the job + if failed_clients: + self.system_panic(f"failed to start clients {failed_clients}", fl_ctx) + return False + + self.log_info(fl_ctx, f"successfully started clients {self.participating_clients}") + return True + + def control_flow(self, abort_signal: Signal, fl_ctx: FLContext): + """ + To ensure smooth app execution: + - ensure that all clients are online and ready to go before starting server + - ensure that server is started and ready to take requests before asking clients to start operation + - monitor the health of the clients + - if anything goes wrong, terminate the job + + Args: + abort_signal: abort signal that is used to notify components to abort + fl_ctx: FL context + + Returns: None + + """ + self.abort_signal = abort_signal + + # the connector uses the same abort signal! + self.connector.set_abort_signal(abort_signal) + + # wait for every client to become online and properly configured + self.log_info(fl_ctx, f"Waiting for clients to be ready: {self.participating_clients}") + + # configure all clients + if not self._configure_clients(abort_signal, fl_ctx): + self.system_panic("failed to configure all clients", fl_ctx) + return + + # configure and start the connector + try: + config = self.get_connector_config_params(fl_ctx) + self.connector.configure(config, fl_ctx) + self.log_info(fl_ctx, "starting connector ...") + self.connector.start(fl_ctx) + except Exception as ex: + error = f"failed to start connector: {secure_format_exception(ex)}" + self.log_error(fl_ctx, error) + self.system_panic(error, fl_ctx) + return + + self.connector.monitor(fl_ctx, self._app_stopped) + + # start all clients + if not self._start_clients(abort_signal, fl_ctx): + self.system_panic("failed to start all clients", fl_ctx) + return + + # monitor client health + # we periodically check job status until all clients are done or the system is stopped + self.log_info(fl_ctx, "Waiting for clients to finish ...") + while not self._is_stopped(): + done = self._check_job_status(fl_ctx) + if done: + break + time.sleep(self.job_status_check_interval) + + def _app_stopped(self, rc, fl_ctx: FLContext): + # This CB is called when app server is stopped + error = None + if rc != 0: + self.log_error(fl_ctx, f"App Server stopped abnormally with code {rc}") + error = "App server abnormal stop" + + # the app server could stop at any moment, we trigger the abort_signal in case it is checked by any + # other components + self._trigger_stop(fl_ctx, error) + + def _process_configure_reply(self, client_task: ClientTask, fl_ctx: FLContext): + result = client_task.result + client_name = client_task.client.name + + rc = result.get_return_code() + if rc == ReturnCode.OK: + self.log_info(fl_ctx, f"successfully configured client {client_name}") + cs = self.client_statuses.get(client_name) + if cs: + assert isinstance(cs, _ClientStatus) + cs.configured_time = time.time() + else: + self.log_error(fl_ctx, f"client {client_task.client.name} failed to configure: {rc}") + + def _process_start_reply(self, client_task: ClientTask, fl_ctx: FLContext): + result = client_task.result + client_name = client_task.client.name + + rc = result.get_return_code() + if rc == ReturnCode.OK: + self.log_info(fl_ctx, f"successfully started client {client_name}") + cs = self.client_statuses.get(client_name) + if cs: + assert isinstance(cs, _ClientStatus) + cs.started_time = time.time() + else: + self.log_error(fl_ctx, f"client {client_name} failed to start") + + def _check_job_status(self, fl_ctx: FLContext) -> bool: + """Check job status and determine whether the job is done. + + Args: + fl_ctx: FL context + + Returns: whether the job is considered done. + + """ + now = time.time() + + # overall_last_progress_time is the latest time that any client made progress. + overall_last_progress_time = 0.0 + clients_done = 0 + for client_name, cs in self.client_statuses.items(): + assert isinstance(cs, _ClientStatus) + + if cs.app_done: + self.log_info(fl_ctx, f"client {client_name} is Done") + clients_done += 1 + elif now - cs.last_op_time > self.max_client_op_interval: + self.system_panic( + f"client {client_name} didn't have any activity for {self.max_client_op_interval} seconds", + fl_ctx, + ) + return True + + if overall_last_progress_time < cs.last_op_time: + overall_last_progress_time = cs.last_op_time + + if clients_done == len(self.client_statuses): + # all clients are done - the job is considered done + return True + elif time.time() - overall_last_progress_time > self.progress_timeout: + # there has been no progress from any client for too long. + # this could be because the clients got stuck. + # consider the job done and abort the job. + self.system_panic(f"the job has no progress for {self.progress_timeout} seconds", fl_ctx) + return True + return False + + def process_result_of_unknown_task( + self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext + ): + self.log_warning(fl_ctx, f"ignored unknown task {task_name} from client {client.name}") + + def stop_controller(self, fl_ctx: FLContext): + """This is called by base controller to stop. + If a subclass overwrites this method, it must call super().stop_controller(fl_ctx). + + Args: + fl_ctx: + + Returns: + + """ + if self.connector: + self.log_info(fl_ctx, "Stopping server connector ...") + self.connector.stop(fl_ctx) + self.log_info(fl_ctx, "Server connector stopped") diff --git a/nvflare/app_common/tie/defs.py b/nvflare/app_common/tie/defs.py new file mode 100644 index 0000000000..b06bb69042 --- /dev/null +++ b/nvflare/app_common/tie/defs.py @@ -0,0 +1,43 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Constant: + + # task name defaults + CONFIG_TASK_NAME = "config" + START_TASK_NAME = "start" + + # default component config values + CONFIG_TASK_TIMEOUT = 10 + START_TASK_TIMEOUT = 10 + + TASK_CHECK_INTERVAL = 0.5 + JOB_STATUS_CHECK_INTERVAL = 2.0 + MAX_CLIENT_OP_INTERVAL = 90.0 + WORKFLOW_PROGRESS_TIMEOUT = 3600.0 + + # message topics + TOPIC_APP_REQUEST = "tie.request" + TOPIC_CLIENT_DONE = "tie.client_done" + + # keys for Shareable between client and server + MSG_KEY_EXIT_CODE = "tie.exit_code" + MSG_KEY_OP = "tie.op" + MSG_KEY_CONFIG = "tie.config" + + EXIT_CODE_CANT_START = 101 + EXIT_CODE_FATAL_ERROR = 102 + + APP_CTX_FL_CONTEXT = "tie.fl_context" diff --git a/nvflare/app_common/tie/executor.py b/nvflare/app_common/tie/executor.py new file mode 100644 index 0000000000..f40bca9898 --- /dev/null +++ b/nvflare/app_common/tie/executor.py @@ -0,0 +1,197 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from abc import abstractmethod + +from nvflare.apis.event_type import EventType +from nvflare.apis.executor import Executor +from nvflare.apis.fl_constant import ReturnCode +from nvflare.apis.fl_context import FLContext +from nvflare.apis.shareable import Shareable, make_reply +from nvflare.apis.signal import Signal +from nvflare.app_common.tie.applet import Applet +from nvflare.app_common.tie.connector import Connector +from nvflare.fuel.f3.cellnet.fqcn import FQCN +from nvflare.security.logging import secure_format_exception + +from .defs import Constant + + +class TieExecutor(Executor): + def __init__( + self, + configure_task_name=Constant.CONFIG_TASK_NAME, + start_task_name=Constant.START_TASK_NAME, + ): + """Constructor + + Args: + configure_task_name: name of the config task + start_task_name: name of the start task + """ + Executor.__init__(self) + self.configure_task_name = configure_task_name + self.start_task_name = start_task_name + self.connector = None + self.engine = None + + # create the abort signal to be used for signaling the connector + self.abort_signal = Signal() + + @abstractmethod + def get_connector(self, fl_ctx: FLContext) -> Connector: + """Called by the TieExecutor to get the Connector to be used by this executor. + A subclass of TieExecutor must implement this method. + + Args: + fl_ctx: the FL context + + Returns: a Connector object + + """ + pass + + @abstractmethod + def get_applet(self, fl_ctx: FLContext) -> Applet: + """Called by the TieExecutor to get the Applet to be used by this executor. + A subclass of TieExecutor must implement this method. + + Args: + fl_ctx: the FL context + + Returns: an Applet object + + """ + pass + + def configure(self, config: dict, fl_ctx: FLContext): + """Called by the TieExecutor to configure the executor based on the config params received from the server. + A subclass of TieExecutor should implement this method. + + Args: + config: the config data + fl_ctx: FL context + + Returns: None + + """ + pass + + def get_connector_config(self, fl_ctx: FLContext) -> dict: + """Called by the TieExecutor to get config params for the connector. + A subclass of TieExecutor should implement this method. + Note that this method is always called after the "configure" method, hence it's possible to dynamically + determine the connector's config based on the config params in the "configure" step. + + Args: + fl_ctx: the FL context + + Returns: a dict of config params + + """ + return {} + + def handle_event(self, event_type: str, fl_ctx: FLContext): + if event_type == EventType.START_RUN: + self.engine = fl_ctx.get_engine() + connector = self.get_connector(fl_ctx) + if not connector: + self.system_panic("cannot get connector", fl_ctx) + return + + if not isinstance(connector, Connector): + self.system_panic( + f"invalid connector: expect Connector but got {type(connector)}", + fl_ctx, + ) + return + + applet = self.get_applet(fl_ctx) + if not applet: + self.system_panic("cannot get applet", fl_ctx) + return + + if not isinstance(applet, Applet): + self.system_panic( + f"invalid applet: expect Applet but got {type(applet)}", + fl_ctx, + ) + return + + applet.initialize(fl_ctx) + connector.set_abort_signal(self.abort_signal) + connector.set_applet(applet) + connector.initialize(fl_ctx) + self.connector = connector + elif event_type == EventType.FATAL_SYSTEM_ERROR: + # notify server that the client is done + self._notify_client_done(Constant.EXIT_CODE_FATAL_ERROR, fl_ctx) + elif event_type == EventType.END_RUN: + self.abort_signal.trigger(True) + + def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: + if task_name == self.configure_task_name: + config = shareable.get(Constant.MSG_KEY_CONFIG) + if config is None: + self.log_error(fl_ctx, f"missing {Constant.MSG_KEY_CONFIG} from config") + return make_reply(ReturnCode.BAD_TASK_DATA) + + self.configure(config, fl_ctx) + + # configure the connector + connector_config = self.get_connector_config(fl_ctx) + self.connector.configure(connector_config, fl_ctx) + return make_reply(ReturnCode.OK) + elif task_name == self.start_task_name: + # start the connector + try: + self.connector.start(fl_ctx) + except Exception as ex: + self.log_exception(fl_ctx, f"failed to start connector: {secure_format_exception(ex)}") + return make_reply(ReturnCode.EXECUTION_EXCEPTION) + + # start to monitor the connector and applet + self.connector.monitor(fl_ctx, self._notify_client_done) + return make_reply(ReturnCode.OK) + else: + self.log_error(fl_ctx, f"ignored unsupported {task_name}") + return make_reply(ReturnCode.TASK_UNSUPPORTED) + + def _notify_client_done(self, rc, fl_ctx: FLContext): + """This is called when app is done. + We send a message to the FL server telling it that this client is done. + + Args: + rc: the return/exit code + fl_ctx: FL context + + Returns: None + + """ + if rc != 0: + self.log_error(fl_ctx, f"App stopped with RC {rc}") + else: + self.log_info(fl_ctx, "App Stopped") + + # tell server that this client is done + engine = fl_ctx.get_engine() + req = Shareable() + req[Constant.MSG_KEY_EXIT_CODE] = rc + engine.send_aux_request( + targets=[FQCN.ROOT_SERVER], + topic=Constant.TOPIC_CLIENT_DONE, + request=req, + timeout=0, # fire and forget + fl_ctx=fl_ctx, + optional=True, + ) diff --git a/nvflare/app_common/tie/process_mgr.py b/nvflare/app_common/tie/process_mgr.py new file mode 100644 index 0000000000..e8c300925f --- /dev/null +++ b/nvflare/app_common/tie/process_mgr.py @@ -0,0 +1,209 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import shlex +import subprocess +import sys +import threading + +from nvflare.apis.fl_constant import FLContextKey +from nvflare.apis.fl_context import FLContext +from nvflare.apis.workspace import Workspace +from nvflare.fuel.utils.obj_utils import get_logger +from nvflare.fuel.utils.validation_utils import check_object_type, check_str + + +class CommandDescriptor: + def __init__( + self, + cmd: str, + cwd=None, + env=None, + log_file_name: str = "", + log_stdout: bool = True, + stdout_msg_prefix: str = None, + ): + """Constructor of CommandDescriptor. + A CommandDescriptor describes the requirements of the new process to be started. + + Args: + cmd: the command to be executed to start the new process + cwd: current work dir for the new process + env: system env for the new process + log_file_name: base name of the log file. + log_stdout: whether to output log messages to stdout. + stdout_msg_prefix: prefix to be prepended to log message when writing to stdout. + Since multiple processes could be running within the same terminal window, the prefix can help + differentiate log messages from these processes. + """ + check_str("cmd", cmd) + + if cwd: + check_str("cwd", cwd) + + if env: + check_object_type("env", env, dict) + + if log_file_name: + check_str("log_file_name", log_file_name) + + if stdout_msg_prefix: + check_str("stdout_msg_prefix", stdout_msg_prefix) + + self.cmd = cmd + self.cwd = cwd + self.env = env + self.log_file_name = log_file_name + self.log_stdout = log_stdout + self.stdout_msg_prefix = stdout_msg_prefix + + +class ProcessManager: + def __init__(self, cmd_desc: CommandDescriptor): + """Constructor of ProcessManager. + ProcessManager provides methods for managing the lifecycle of a subprocess (start, stop, poll), as well + as the handling of log file to be used by the subprocess. + + Args: + cmd_desc: the CommandDescriptor that describes the command of the new process to be started + + NOTE: the methods of ProcessManager are not thread safe. + + """ + check_object_type("cmd_desc", cmd_desc, CommandDescriptor) + self.process = None + self.cmd_desc = cmd_desc + self.log_file = None + self.msg_prefix = None + self.file_lock = threading.Lock() + self.logger = get_logger(self) + + def start( + self, + fl_ctx: FLContext, + ): + """Start the new process. + + Args: + fl_ctx: FLContext object. + + Returns: None + + """ + job_id = fl_ctx.get_job_id() + + if self.cmd_desc.stdout_msg_prefix: + site_name = fl_ctx.get_identity_name() + self.msg_prefix = f"[{self.cmd_desc.stdout_msg_prefix}@{site_name}]" + + if self.cmd_desc.log_file_name: + ws = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT) + if not isinstance(ws, Workspace): + self.logger.error( + f"FL context prop {FLContextKey.WORKSPACE_OBJECT} should be Workspace but got {type(ws)}" + ) + raise RuntimeError("bad FLContext object") + + run_dir = ws.get_run_dir(job_id) + log_file_path = os.path.join(run_dir, self.cmd_desc.log_file_name) + self.log_file = open(log_file_path, "a") + + env = os.environ.copy() + if self.cmd_desc.env: + env.update(self.cmd_desc.env) + + command_seq = shlex.split(self.cmd_desc.cmd) + self.process = subprocess.Popen( + command_seq, + stderr=subprocess.STDOUT, + cwd=self.cmd_desc.cwd, + env=env, + stdout=subprocess.PIPE, + ) + + log_writer = threading.Thread(target=self._write_log, daemon=True) + log_writer.start() + + def _write_log(self): + # write messages from the process's stdout pipe to log file and sys.stdout. + # note that depending on how the process flushes out its output, the messages may be buffered/delayed. + while True: + line = self.process.stdout.readline() + if not line: + break + + assert isinstance(line, bytes) + line = line.decode("utf-8") + # use file_lock to ensure file integrity since the log file could be closed by the self.stop() method! + with self.file_lock: + if self.log_file: + self.log_file.write(line) + self.log_file.flush() + + if self.cmd_desc.log_stdout: + assert isinstance(line, str) + if self.msg_prefix and not line.startswith("\r"): + line = f"{self.msg_prefix} {line}" + sys.stdout.write(line) + sys.stdout.flush() + + def poll(self): + """Perform a poll request on the process. + + Returns: None if the process is still running; an exit code (int) if process is not running. + + """ + if not self.process: + raise RuntimeError("there is no process to poll") + return self.process.poll() + + def stop(self) -> int: + """Stop the process. + If the process is still running, kill the process. If a log file is open, close the log file. + + Returns: the exit code of the process. If killed, returns -9. + + """ + rc = self.poll() + if rc is None: + # process is still alive + try: + self.process.kill() + rc = -9 + except: + # ignore kill error + pass + + # close the log file if any + with self.file_lock: + if self.log_file: + self.logger.debug("closed subprocess log file!") + self.log_file.close() + self.log_file = None + return rc + + +def start_process(cmd_desc: CommandDescriptor, fl_ctx: FLContext) -> ProcessManager: + """Convenience function for starting a subprocess. + + Args: + cmd_desc: the CommandDescriptor the describes the command to be executed + fl_ctx: FLContext object + + Returns: a ProcessManager object. + + """ + mgr = ProcessManager(cmd_desc) + mgr.start(fl_ctx) + return mgr diff --git a/nvflare/app_common/tie/py_applet.py b/nvflare/app_common/tie/py_applet.py new file mode 100644 index 0000000000..bd47ce6261 --- /dev/null +++ b/nvflare/app_common/tie/py_applet.py @@ -0,0 +1,240 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import os +import sys +import threading +import time +from abc import ABC, abstractmethod + +from nvflare.apis.workspace import Workspace +from nvflare.fuel.utils.log_utils import add_log_file_handler, configure_logging +from nvflare.security.logging import secure_format_exception, secure_log_traceback + +from .applet import Applet +from .defs import Constant + + +class PyRunner(ABC): + + """ + A PyApplet must return a light-weight PyRunner object to run the Python code of the external app. + Since the runner could be running in a separate subprocess, the runner object must be pickleable! + """ + + @abstractmethod + def start(self, app_ctx: dict): + """Start the external app's Python code + + Args: + app_ctx: the app's execution context + + Returns: + + """ + pass + + @abstractmethod + def stop(self, timeout: float): + """Stop the external app's python code + + Args: + timeout: how long to wait for the app to stop before killing it + + Returns: None + + """ + pass + + @abstractmethod + def is_stopped(self) -> (bool, int): + """Check whether the app code is stopped + + Returns: a tuple of: whether the app is stopped, and exit code if stopped + + """ + pass + + +class _PyStarter: + """This class is used to start the Python code of the applet. It is used when running the applet in a thread + or in a separate process. + """ + + def __init__(self, runner: PyRunner, in_process: bool, workspace: Workspace, job_id: str): + self.runner = runner + self.in_process = in_process + self.workspace = workspace + self.job_id = job_id + self.error = None + self.started = True + self.stopped = False + self.exit_code = 0 + + def start(self, app_ctx: dict): + """Start the applet and wait for it to finish. + + Args: + app_ctx: the app's execution context + + Returns: None + + """ + try: + if not self.in_process: + # enable logging + run_dir = self.workspace.get_run_dir(self.job_id) + log_file_name = os.path.join(run_dir, "applet_log.txt") + configure_logging(self.workspace) + add_log_file_handler(log_file_name) + self.runner.start(app_ctx) + + # Note: run_func does not return until it runs to completion! + self.stopped = True + except Exception as e: + secure_log_traceback() + self.error = f"Exception starting applet: {secure_format_exception(e)}" + self.started = False + self.exit_code = Constant.EXIT_CODE_CANT_START + self.stopped = True + if not self.in_process: + # this is a separate process + sys.exit(self.exit_code) + + +class PyApplet(Applet, ABC): + def __init__(self, in_process: bool): + """Constructor of PyApplet, which runs the applet's Python code in a separate thread or subprocess. + + Args: + in_process: whether to run the applet code as separate thread within the same process or as a separate + subprocess. + """ + Applet.__init__(self) + self.in_process = in_process + self.starter = None + self.process = None + self.runner = None + + @abstractmethod + def get_runner(self, app_ctx: dict) -> PyRunner: + """Subclass must implement this method to return a PyRunner. + The returned PyRunner must be pickleable since it could be run in a separate subprocess! + + Args: + app_ctx: the app context for the runner + + Returns: a PyRunner object + + """ + pass + + def start(self, app_ctx: dict): + """Start the execution of the applet. + + Args: + app_ctx: the app context + + Returns: + + """ + fl_ctx = app_ctx.get(Constant.APP_CTX_FL_CONTEXT) + engine = fl_ctx.get_engine() + workspace = engine.get_workspace() + job_id = fl_ctx.get_job_id() + runner = self.get_runner(app_ctx) + + if not isinstance(runner, PyRunner): + raise RuntimeError(f"runner must be a PyRunner but got {type(runner)}") + + self.runner = runner + self.starter = _PyStarter(runner, self.in_process, workspace, job_id) + if self.in_process: + self._start_in_thread(self.starter, app_ctx) + else: + self._start_in_process(self.starter, app_ctx) + + def _start_in_thread(self, starter, app_ctx: dict): + """Start the applet in a separate thread.""" + self.logger.info("Starting applet in another thread") + thread = threading.Thread(target=starter.start, args=(app_ctx,), daemon=True, name="applet") + thread.start() + if not self.starter.started: + self.logger.error(f"Cannot start applet: {self.starter.error}") + raise RuntimeError(self.starter.error) + + def _start_in_process(self, starter, app_ctx: dict): + """Start the applet in a separate process.""" + # must remove Constant.APP_CTX_FL_CONTEXT from ctx because it's not pickleable! + app_ctx.pop(Constant.APP_CTX_FL_CONTEXT, None) + self.logger.info("Starting applet in another process") + self.process = multiprocessing.Process(target=starter.start, args=(app_ctx,), daemon=True, name="applet") + self.process.start() + + def stop(self, timeout=0.0) -> int: + """Stop the applet + + Args: + timeout: amount of time to wait for the applet to stop by itself. If the applet does not stop on + its own within this time, we'll forcefully stop it by kill. + + Returns: None + + """ + if not self.runner: + raise RuntimeError("PyRunner is not set") + + if self.in_process: + self.runner.stop(timeout) + return 0 + else: + p = self.process + self.process = None + if p: + assert isinstance(p, multiprocessing.Process) + if p.exitcode is None: + # the process is still running + if timeout > 0: + # wait for the applet to stop by itself + start = time.time() + while time.time() - start < timeout: + if p.exitcode is not None: + # already stopped + self.logger.info(f"applet stopped (rc={p.exitcode}) after {time.time()-start} secs") + return p.exitcode + time.sleep(0.1) + self.logger.info("stopped applet by killing the process") + p.kill() + return -9 + + def is_stopped(self) -> (bool, int): + if not self.runner: + raise RuntimeError("PyRunner is not set") + + if self.in_process: + if self.starter: + if self.starter.stopped: + self.logger.info("starter is stopped!") + return True, self.starter.exit_code + return self.runner.is_stopped() + else: + if self.process: + assert isinstance(self.process, multiprocessing.Process) + ec = self.process.exitcode + if ec is None: + return False, 0 + else: + return True, ec + else: + return True, 0 diff --git a/nvflare/app_opt/flower/__init__.py b/nvflare/app_opt/flower/__init__.py new file mode 100644 index 0000000000..d9155f923f --- /dev/null +++ b/nvflare/app_opt/flower/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nvflare/app_opt/flower/applet.py b/nvflare/app_opt/flower/applet.py new file mode 100644 index 0000000000..e3b8bcd5e2 --- /dev/null +++ b/nvflare/app_opt/flower/applet.py @@ -0,0 +1,261 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time + +from nvflare.apis.fl_context import FLContext +from nvflare.apis.workspace import Workspace +from nvflare.app_common.tie.applet import Applet +from nvflare.app_common.tie.cli_applet import CLIApplet +from nvflare.app_common.tie.defs import Constant as TieConstant +from nvflare.app_common.tie.process_mgr import CommandDescriptor, ProcessManager, start_process +from nvflare.app_opt.flower.defs import Constant +from nvflare.fuel.f3.drivers.net_utils import get_open_tcp_port +from nvflare.fuel.utils.grpc_utils import create_channel +from nvflare.security.logging import secure_format_exception + + +class FlowerClientApplet(CLIApplet): + def __init__( + self, + client_app: str, + ): + """Constructor of FlowerClientApplet, which extends CLIApplet. + + Args: + client_app: the client app specification of the Flower app + """ + CLIApplet.__init__(self) + self.client_app = client_app + + def get_command(self, ctx: dict) -> CommandDescriptor: + """Implementation of the get_command method required by the super class CLIApplet. + It returns the CLI command for starting Flower's client app, as well as the full path of the log file + for the client app. + + Args: + ctx: the applet run context + + Returns: CLI command for starting client app and name of log file. + + """ + addr = ctx.get(Constant.APP_CTX_SERVER_ADDR) + fl_ctx = ctx.get(Constant.APP_CTX_FL_CONTEXT) + if not isinstance(fl_ctx, FLContext): + self.logger.error(f"expect APP_CTX_FL_CONTEXT to be FLContext but got {type(fl_ctx)}") + raise RuntimeError("invalid FLContext") + + engine = fl_ctx.get_engine() + ws = engine.get_workspace() + if not isinstance(ws, Workspace): + self.logger.error(f"expect workspace to be Workspace but got {type(ws)}") + raise RuntimeError("invalid workspace") + + job_id = fl_ctx.get_job_id() + custom_dir = ws.get_app_custom_dir(job_id) + app_dir = ws.get_app_dir(job_id) + cmd = f"flower-client-app --insecure --grpc-adapter --superlink {addr} --dir {custom_dir} {self.client_app}" + + # use app_dir as the cwd for flower's client app. + # this is necessary for client_api to be used with the flower client app for metrics logging + # client_api expects config info from the "config" folder in the cwd! + self.logger.info(f"starting flower client app: {cmd}") + return CommandDescriptor(cmd=cmd, cwd=app_dir, log_file_name="client_app_log.txt", stdout_msg_prefix="FLWR-CA") + + +class FlowerServerApplet(Applet): + def __init__( + self, + server_app: str, + database: str, + superlink_ready_timeout: float, + server_app_args: list = None, + ): + """Constructor of FlowerServerApplet. + + Args: + server_app: Flower's server app specification + database: database spec to be used by the server app + superlink_ready_timeout: how long to wait for the superlink process to become ready + server_app_args: an optional list that contains additional command args passed to flower server app + """ + Applet.__init__(self) + self._app_process_mgr = None + self._superlink_process_mgr = None + self.server_app = server_app + self.database = database + self.superlink_ready_timeout = superlink_ready_timeout + self.server_app_args = server_app_args + self._start_error = False + + def _start_process(self, name: str, cmd_desc: CommandDescriptor, fl_ctx: FLContext) -> ProcessManager: + self.logger.info(f"starting {name}: {cmd_desc.cmd}") + try: + return start_process(cmd_desc, fl_ctx) + except Exception as ex: + self.logger.error(f"exception starting applet: {secure_format_exception(ex)}") + self._start_error = True + + def start(self, app_ctx: dict): + """Start the applet. + + Flower requires two processes for server application: + superlink: this process is responsible for client communication + server_app: this process performs server side of training. + + We start the superlink first, and wait for it to become ready, then start the server app. + Each process will have its own log file in the job's run dir. The superlink's log file is named + "superlink_log.txt". The server app's log file is named "server_app_log.txt". + + Args: + app_ctx: the run context of the applet. + + Returns: + + """ + # try to start superlink first + driver_port = get_open_tcp_port(resources={}) + if not driver_port: + raise RuntimeError("failed to get a port for Flower driver") + driver_addr = f"127.0.0.1:{driver_port}" + + server_addr = app_ctx.get(Constant.APP_CTX_SERVER_ADDR) + fl_ctx = app_ctx.get(Constant.APP_CTX_FL_CONTEXT) + if not isinstance(fl_ctx, FLContext): + self.logger.error(f"expect APP_CTX_FL_CONTEXT to be FLContext but got {type(fl_ctx)}") + raise RuntimeError("invalid FLContext") + + engine = fl_ctx.get_engine() + ws = engine.get_workspace() + if not isinstance(ws, Workspace): + self.logger.error(f"expect workspace to be Workspace but got {type(ws)}") + raise RuntimeError("invalid workspace") + + custom_dir = ws.get_app_custom_dir(fl_ctx.get_job_id()) + + db_arg = "" + if self.database: + db_arg = f"--database {self.database}" + + superlink_cmd = ( + f"flower-superlink --insecure {db_arg} " + f"--fleet-api-address {server_addr} --fleet-api-type grpc-adapter " + f"--driver-api-address {driver_addr}" + ) + + cmd_desc = CommandDescriptor(cmd=superlink_cmd, log_file_name="superlink_log.txt", stdout_msg_prefix="FLWR-SL") + + self._superlink_process_mgr = self._start_process(name="superlink", cmd_desc=cmd_desc, fl_ctx=fl_ctx) + if not self._superlink_process_mgr: + raise RuntimeError("cannot start superlink process") + + # wait until superlink's port is ready before starting server app + # note: the server app will connect to driver_addr, not server_addr + start_time = time.time() + create_channel( + server_addr=driver_addr, + grpc_options=None, + ready_timeout=self.superlink_ready_timeout, + test_only=True, + ) + self.logger.info(f"superlink is ready for server app in {time.time()-start_time} seconds") + + # start the server app + args_str = "" + if self.server_app_args: + args_str = " ".join(self.server_app_args) + + app_cmd = ( + f"flower-server-app --insecure --superlink {driver_addr} --dir {custom_dir} {args_str} {self.server_app}" + ) + cmd_desc = CommandDescriptor( + cmd=app_cmd, + log_file_name="server_app_log.txt", + stdout_msg_prefix="FLWR-SA", + ) + + self._app_process_mgr = self._start_process(name="server_app", cmd_desc=cmd_desc, fl_ctx=fl_ctx) + if not self._app_process_mgr: + # stop the superlink + self._superlink_process_mgr.stop() + self._superlink_process_mgr = None + raise RuntimeError("cannot start server_app process") + + @staticmethod + def _stop_process(p: ProcessManager) -> int: + if not p: + # nothing to stop + return 0 + else: + return p.stop() + + def stop(self, timeout=0.0) -> int: + """Stop the server applet's superlink and server app processes. + + Args: + timeout: how long to wait before forcefully stopping (kill) the process. + + Note: we always stop the process immediately - do not wait for the process to stop itself. + + Returns: + + """ + rc = self._stop_process(self._app_process_mgr) + self._app_process_mgr = None + + self._stop_process(self._superlink_process_mgr) + self._superlink_process_mgr = None + + # return the rc of the server app! + return rc + + @staticmethod + def _is_process_stopped(p: ProcessManager): + if p: + return_code = p.poll() + if return_code is None: + return False, 0 + else: + return True, return_code + else: + return True, 0 + + def is_stopped(self) -> (bool, int): + """Check whether the server applet is already stopped + + Returns: a tuple of: whether the applet is stopped, exit code if stopped. + + Note: if either superlink or server app is stopped, we treat the applet as stopped. + + """ + if self._start_error: + return True, TieConstant.EXIT_CODE_CANT_START + + # check server app + app_stopped, app_rc = self._is_process_stopped(self._app_process_mgr) + if app_stopped: + self._app_process_mgr = None + + superlink_stopped, superlink_rc = self._is_process_stopped(self._superlink_process_mgr) + if superlink_stopped: + self._superlink_process_mgr = None + + if app_stopped or superlink_stopped: + self.stop() + + if app_stopped: + return True, app_rc + elif superlink_stopped: + return True, superlink_rc + else: + return False, 0 diff --git a/nvflare/app_opt/flower/connectors/__init__.py b/nvflare/app_opt/flower/connectors/__init__.py new file mode 100644 index 0000000000..d9155f923f --- /dev/null +++ b/nvflare/app_opt/flower/connectors/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nvflare/app_opt/flower/connectors/flower_connector.py b/nvflare/app_opt/flower/connectors/flower_connector.py new file mode 100644 index 0000000000..deb31f3fd3 --- /dev/null +++ b/nvflare/app_opt/flower/connectors/flower_connector.py @@ -0,0 +1,144 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from abc import abstractmethod + +from nvflare.apis.fl_context import FLContext +from nvflare.apis.shareable import ReturnCode, Shareable, make_reply +from nvflare.apis.signal import Signal +from nvflare.app_common.tie.connector import Connector +from nvflare.app_opt.flower.defs import Constant +from nvflare.fuel.utils.validation_utils import check_positive_int, check_positive_number + + +class FlowerServerConnector(Connector): + """ + FlowerServerConnector specifies commonly required methods for server connector implementations. + """ + + def __init__(self): + Connector.__init__(self) + self.num_rounds = None + + def configure(self, config: dict, fl_ctx: FLContext): + """Called by Flower Controller to configure the site. + + Args: + config: config data + fl_ctx: FL context + + Returns: None + + """ + num_rounds = config.get(Constant.CONF_KEY_NUM_ROUNDS) + if num_rounds is None: + raise RuntimeError("num_rounds is not configured") + + check_positive_int(Constant.CONF_KEY_NUM_ROUNDS, num_rounds) + self.num_rounds = num_rounds + + @abstractmethod + def send_request_to_flower(self, request: Shareable, fl_ctx: FLContext) -> Shareable: + """Send request to the Flower server. + Subclass must implement this method to send this request to the Flower server. + + Args: + request: the request received from FL client + fl_ctx: the FL context + + Returns: reply from the Flower server converted to Shareable + + """ + pass + + def process_app_request(self, op: str, request: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: + """This method is called by the FL Server when the request is received from a FL client. + + Args: + op: the op code of the request. + request: the request received from FL client + fl_ctx: FL context + abort_signal: abort signal that could be triggered during the process + + Returns: response from the Flower server converted to Shareable + + """ + stopped, ec = self._is_stopped() + if stopped: + self.log_warning(fl_ctx, f"dropped request '{op}' since connector is already stopped {ec=}") + return make_reply(ReturnCode.SERVICE_UNAVAILABLE) + + reply = self.send_request_to_flower(request, fl_ctx) + self.log_info(fl_ctx, f"received reply for '{op}'") + return reply + + +class FlowerClientConnector(Connector): + """ + FlowerClientConnector defines commonly required methods for client connector implementations. + """ + + def __init__(self, per_msg_timeout: float, tx_timeout: float): + """Constructor of FlowerClientConnector + + Args: + per_msg_timeout: per-msg timeout to be used when sending request to server via ReliableMessage + tx_timeout: tx timeout to be used when sending request to server via ReliableMessage + """ + check_positive_number("per_msg_timeout", per_msg_timeout) + check_positive_number("tx_timeout", tx_timeout) + + Connector.__init__(self) + self.per_msg_timeout = per_msg_timeout + self.tx_timeout = tx_timeout + self.stopped = False + self.num_rounds = None + + def configure(self, config: dict, fl_ctx: FLContext): + """Called by Flower Executor to configure the target. + + Args: + config: config data + fl_ctx: FL context + + Returns: None + + """ + num_rounds = config.get(Constant.CONF_KEY_NUM_ROUNDS) + if num_rounds is None: + raise RuntimeError("num_rounds is not configured") + + check_positive_int(Constant.CONF_KEY_NUM_ROUNDS, num_rounds) + self.num_rounds = num_rounds + + def _send_flower_request(self, request: Shareable) -> Shareable: + """Send Flower request to the FL server via FLARE message. + + Args: + request: shareable that contains flower msg + + Returns: operation result + + """ + op = "request" + reply = self.send_request( + op=op, + target=None, # server + request=request, + per_msg_timeout=self.per_msg_timeout, + tx_timeout=self.tx_timeout, + fl_ctx=None, + ) + if not isinstance(reply, Shareable): + raise RuntimeError(f"invalid reply for op {op}: expect Shareable but got {type(reply)}") + return reply diff --git a/nvflare/app_opt/flower/connectors/grpc_client_connector.py b/nvflare/app_opt/flower/connectors/grpc_client_connector.py new file mode 100644 index 0000000000..4d61dc1d38 --- /dev/null +++ b/nvflare/app_opt/flower/connectors/grpc_client_connector.py @@ -0,0 +1,139 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import flwr.proto.grpcadapter_pb2 as pb2 +from flwr.proto.grpcadapter_pb2_grpc import GrpcAdapterServicer + +from nvflare.apis.fl_context import FLContext +from nvflare.apis.shareable import ReturnCode +from nvflare.app_opt.flower.connectors.flower_connector import FlowerClientConnector +from nvflare.app_opt.flower.defs import Constant +from nvflare.app_opt.flower.grpc_server import GrpcServer +from nvflare.app_opt.flower.utils import msg_container_to_shareable, reply_should_exit, shareable_to_msg_container +from nvflare.fuel.f3.drivers.net_utils import get_open_tcp_port +from nvflare.security.logging import secure_format_exception + + +class GrpcClientConnector(FlowerClientConnector, GrpcAdapterServicer): + def __init__( + self, + int_server_grpc_options=None, + per_msg_timeout=2.0, + tx_timeout=10.0, + client_shutdown_timeout=5.0, + ): + """Constructor of GrpcClientConnector. + GrpcClientConnector is used to connect Flare Client with the Flower Client App. + + Args: + int_server_grpc_options: internal grpc server options + per_msg_timeout: per-message timeout for using ReliableMessage + tx_timeout: transaction timeout for using ReliableMessage + client_shutdown_timeout: max time for shutting down Flare client + """ + FlowerClientConnector.__init__(self, per_msg_timeout, tx_timeout) + self.client_shutdown_timeout = client_shutdown_timeout + self.int_server_grpc_options = int_server_grpc_options + self.internal_grpc_server = None + self.stopped = False + self.internal_server_addr = None + self._training_stopped = False + self._client_name = None + + def initialize(self, fl_ctx: FLContext): + super().initialize(fl_ctx) + self._client_name = fl_ctx.get_identity_name() + + def _start_client(self, server_addr: str, fl_ctx: FLContext): + app_ctx = { + Constant.APP_CTX_CLIENT_NAME: self._client_name, + Constant.APP_CTX_SERVER_ADDR: server_addr, + Constant.APP_CTX_NUM_ROUNDS: self.num_rounds, + } + self.start_applet(app_ctx, fl_ctx) + + def _stop_client(self): + self._training_stopped = True + self.stop_applet(self.client_shutdown_timeout) + + def _is_stopped(self) -> (bool, int): + applet_stopped, ec = self.is_applet_stopped() + if applet_stopped: + return applet_stopped, ec + + if self._training_stopped: + return True, 0 + + return False, 0 + + def start(self, fl_ctx: FLContext): + if not self.num_rounds: + raise RuntimeError("cannot start - num_rounds is not set") + + # dynamically determine address on localhost + port = get_open_tcp_port(resources={}) + if not port: + raise RuntimeError("failed to get a port for Flower server") + self.internal_server_addr = f"127.0.0.1:{port}" + self.logger.info(f"Start internal server at {self.internal_server_addr}") + self.internal_grpc_server = GrpcServer(self.internal_server_addr, 10, self.int_server_grpc_options, self) + self.internal_grpc_server.start(no_blocking=True) + self.logger.info(f"Started internal grpc server at {self.internal_server_addr}") + self._start_client(self.internal_server_addr, fl_ctx) + self.logger.info("Started external Flower grpc client") + + def stop(self, fl_ctx: FLContext): + if self.stopped: + return + + self.stopped = True + self._stop_client() + + if self.internal_grpc_server: + self.logger.info("Stop internal grpc Server") + self.internal_grpc_server.shutdown() + + def _abort(self, reason: str): + # stop the gRPC client (the target) + self.abort_signal.trigger(True) + + # abort the FL client + with self.engine.new_context() as fl_ctx: + self.system_panic(reason, fl_ctx) + + def SendReceive(self, request: pb2.MessageContainer, context): + """Process request received from a Flower client. + + This implements the SendReceive method required by Flower gRPC server (LGS on FLARE Client). + 1. convert the request to a Shareable object. + 2. send the Shareable request to FLARE server. + 3. convert received Shareable result to MessageContainer and return to the Flower client + + Args: + request: the request received from the Flower client + context: gRPC context + + Returns: the reply MessageContainer object + + """ + try: + reply = self._send_flower_request(msg_container_to_shareable(request)) + rc = reply.get_return_code() + if rc == ReturnCode.OK: + return shareable_to_msg_container(reply) + else: + # server side already ended + self.logger.warning(f"Flower server has stopped with RC {rc}") + return reply_should_exit() + except Exception as ex: + self._abort(reason=f"_send_flower_request exception: {secure_format_exception(ex)}") diff --git a/nvflare/app_opt/flower/connectors/grpc_server_connector.py b/nvflare/app_opt/flower/connectors/grpc_server_connector.py new file mode 100644 index 0000000000..5a88bc365a --- /dev/null +++ b/nvflare/app_opt/flower/connectors/grpc_server_connector.py @@ -0,0 +1,109 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import flwr.proto.grpcadapter_pb2 as pb2 + +from nvflare.apis.fl_context import FLContext +from nvflare.apis.shareable import ReturnCode, Shareable, make_reply +from nvflare.app_opt.flower.connectors.flower_connector import FlowerServerConnector +from nvflare.app_opt.flower.defs import Constant +from nvflare.app_opt.flower.grpc_client import GrpcClient +from nvflare.app_opt.flower.utils import msg_container_to_shareable, shareable_to_msg_container +from nvflare.fuel.f3.drivers.net_utils import get_open_tcp_port + + +class GrpcServerConnector(FlowerServerConnector): + def __init__( + self, + int_client_grpc_options=None, + flower_server_ready_timeout=Constant.FLOWER_SERVER_READY_TIMEOUT, + ): + FlowerServerConnector.__init__(self) + self.int_client_grpc_options = int_client_grpc_options + self.flower_server_ready_timeout = flower_server_ready_timeout + self.internal_grpc_client = None + self._server_stopped = False + self._exit_code = 0 + + def _start_server(self, addr: str, fl_ctx: FLContext): + app_ctx = { + Constant.APP_CTX_SERVER_ADDR: addr, + Constant.APP_CTX_NUM_ROUNDS: self.num_rounds, + } + self.start_applet(app_ctx, fl_ctx) + + def _stop_server(self): + self._server_stopped = True + self._exit_code = self.stop_applet() + + def _is_stopped(self) -> (bool, int): + runner_stopped, ec = self.is_applet_stopped() + if runner_stopped: + self.logger.info("applet is stopped!") + return runner_stopped, ec + + if self._server_stopped: + self.logger.info("Flower grpc server is stopped!") + return True, self._exit_code + + return False, 0 + + def start(self, fl_ctx: FLContext): + # we dynamically create server address on localhost + port = get_open_tcp_port(resources={}) + if not port: + raise RuntimeError("failed to get a port for Flower grpc server") + + server_addr = f"127.0.0.1:{port}" + self.log_info(fl_ctx, f"starting grpc connector: {server_addr=}") + self._start_server(server_addr, fl_ctx) + + # start internal grpc client + self.internal_grpc_client = GrpcClient(server_addr, self.int_client_grpc_options) + self.internal_grpc_client.start(ready_timeout=self.flower_server_ready_timeout) + + def stop(self, fl_ctx: FLContext): + client = self.internal_grpc_client + self.internal_grpc_client = None + if client: + self.log_info(fl_ctx, "Stopping internal grpc client") + client.stop() + self._stop_server() + + def send_request_to_flower(self, request: Shareable, fl_ctx: FLContext) -> Shareable: + """Send the request received from FL client to Flower server. + + This is done by: + 1. convert the request to Flower-defined MessageContainer object + 2. Send the MessageContainer object to Flower server via the internal GRPC client (LGC) + 3. Convert the reply MessageContainer object received from the Flower server to Shareable + 4. Return the reply Shareable object + + Args: + request: the request received from FL client + fl_ctx: FL context + + Returns: response from Flower server converted to Shareable + + """ + stopped, _ = self.is_applet_stopped() + if stopped: + self.log_warning(fl_ctx, "dropped app request since applet is already stopped") + return make_reply(ReturnCode.SERVICE_UNAVAILABLE) + + result = self.internal_grpc_client.send_request(shareable_to_msg_container(request)) + + if isinstance(result, pb2.MessageContainer): + return msg_container_to_shareable(result) + else: + raise RuntimeError(f"bad result from Flower server: expect MessageContainer but got {type(result)}") diff --git a/nvflare/app_opt/flower/controller.py b/nvflare/app_opt/flower/controller.py new file mode 100644 index 0000000000..69498fc794 --- /dev/null +++ b/nvflare/app_opt/flower/controller.py @@ -0,0 +1,103 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nvflare.apis.fl_context import FLContext +from nvflare.app_common.tie.controller import TieController +from nvflare.app_common.tie.defs import Constant as TieConstant +from nvflare.app_opt.flower.applet import FlowerServerApplet +from nvflare.app_opt.flower.connectors.grpc_server_connector import GrpcServerConnector +from nvflare.fuel.utils.validation_utils import check_object_type, check_positive_number + +from .defs import Constant + + +class FlowerController(TieController): + def __init__( + self, + num_rounds=1, + server_app: str = "server:app", + database: str = "", + server_app_args: list = None, + superlink_ready_timeout: float = 10.0, + configure_task_name=TieConstant.CONFIG_TASK_NAME, + configure_task_timeout=TieConstant.CONFIG_TASK_TIMEOUT, + start_task_name=TieConstant.START_TASK_NAME, + start_task_timeout=TieConstant.START_TASK_TIMEOUT, + job_status_check_interval: float = TieConstant.JOB_STATUS_CHECK_INTERVAL, + max_client_op_interval: float = TieConstant.MAX_CLIENT_OP_INTERVAL, + progress_timeout: float = TieConstant.WORKFLOW_PROGRESS_TIMEOUT, + int_client_grpc_options=None, + ): + """Constructor of FlowerController + + Args: + num_rounds: number of rounds. Not used in this version. + server_app: the server app specification for Flower server app + database: database name + server_app_args: additional server app CLI args + superlink_ready_timeout: how long to wait for the superlink to become ready before starting server app + configure_task_name: name of the config task + configure_task_timeout: max time allowed for config task to complete + start_task_name: name of the start task + start_task_timeout: max time allowed for start task to complete + job_status_check_interval: how often to check job status + max_client_op_interval: max time allowed for missing client requests + progress_timeout: max time allowed for missing overall progress + int_client_grpc_options: internal grpc client options + """ + TieController.__init__( + self, + configure_task_name=configure_task_name, + configure_task_timeout=configure_task_timeout, + start_task_name=start_task_name, + start_task_timeout=start_task_timeout, + job_status_check_interval=job_status_check_interval, + max_client_op_interval=max_client_op_interval, + progress_timeout=progress_timeout, + ) + + check_positive_number("superlink_ready_timeout", superlink_ready_timeout) + + if server_app_args: + check_object_type("server_app_args", server_app_args, list) + + self.num_rounds = num_rounds + self.server_app = server_app + self.database = database + self.server_app_args = server_app_args + self.superlink_ready_timeout = superlink_ready_timeout + self.int_client_grpc_options = int_client_grpc_options + + def get_connector(self, fl_ctx: FLContext): + return GrpcServerConnector( + int_client_grpc_options=self.int_client_grpc_options, + ) + + def get_applet(self, fl_ctx: FLContext): + return FlowerServerApplet( + server_app=self.server_app, + database=self.database, + superlink_ready_timeout=self.superlink_ready_timeout, + server_app_args=self.server_app_args, + ) + + def get_client_config_params(self, fl_ctx: FLContext) -> dict: + return { + Constant.CONF_KEY_NUM_ROUNDS: self.num_rounds, + } + + def get_connector_config_params(self, fl_ctx: FLContext) -> dict: + return { + Constant.CONF_KEY_NUM_ROUNDS: self.num_rounds, + } diff --git a/nvflare/app_opt/flower/defs.py b/nvflare/app_opt/flower/defs.py new file mode 100644 index 0000000000..f9011c8ee8 --- /dev/null +++ b/nvflare/app_opt/flower/defs.py @@ -0,0 +1,52 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nvflare.app_common.tie.defs import Constant as TieConstant +from nvflare.fuel.f3.drivers.net_utils import MAX_FRAME_SIZE + + +class Constant: + + # task name defaults + CONFIG_TASK_NAME = TieConstant.CONFIG_TASK_NAME + START_TASK_NAME = TieConstant.START_TASK_NAME + + # keys of config parameters + CONF_KEY_NUM_ROUNDS = "num_rounds" + + PARAM_KEY_HEADERS = "flower.headers" + PARAM_KEY_CONTENT = "flower.content" + PARAM_KEY_MSG_NAME = "flower.name" + + # default component config values + CONFIG_TASK_TIMEOUT = TieConstant.CONFIG_TASK_TIMEOUT + START_TASK_TIMEOUT = TieConstant.START_TASK_TIMEOUT + FLOWER_SERVER_READY_TIMEOUT = 10.0 + + TASK_CHECK_INTERVAL = TieConstant.TASK_CHECK_INTERVAL + JOB_STATUS_CHECK_INTERVAL = TieConstant.JOB_STATUS_CHECK_INTERVAL + MAX_CLIENT_OP_INTERVAL = TieConstant.MAX_CLIENT_OP_INTERVAL + WORKFLOW_PROGRESS_TIMEOUT = TieConstant.WORKFLOW_PROGRESS_TIMEOUT + + APP_CTX_SERVER_ADDR = "flower_server_addr" + APP_CTX_PORT = "flower_port" + APP_CTX_CLIENT_NAME = "flower_client_name" + APP_CTX_NUM_ROUNDS = "flower_num_rounds" + APP_CTX_FL_CONTEXT = TieConstant.APP_CTX_FL_CONTEXT + + +GRPC_DEFAULT_OPTIONS = [ + ("grpc.max_send_message_length", MAX_FRAME_SIZE), + ("grpc.max_receive_message_length", MAX_FRAME_SIZE), +] diff --git a/nvflare/app_opt/flower/executor.py b/nvflare/app_opt/flower/executor.py new file mode 100644 index 0000000000..f11e8ee00f --- /dev/null +++ b/nvflare/app_opt/flower/executor.py @@ -0,0 +1,59 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from nvflare.apis.fl_context import FLContext +from nvflare.app_common.tie.executor import TieExecutor +from nvflare.app_opt.flower.applet import FlowerClientApplet +from nvflare.app_opt.flower.connectors.grpc_client_connector import GrpcClientConnector + +from .defs import Constant + + +class FlowerExecutor(TieExecutor): + def __init__( + self, + client_app: str = "client:app", + start_task_name=Constant.START_TASK_NAME, + configure_task_name=Constant.CONFIG_TASK_NAME, + per_msg_timeout=10.0, + tx_timeout=100.0, + client_shutdown_timeout=5.0, + ): + TieExecutor.__init__( + self, + start_task_name=start_task_name, + configure_task_name=configure_task_name, + ) + + self.int_server_grpc_options = None + self.per_msg_timeout = per_msg_timeout + self.tx_timeout = tx_timeout + self.client_shutdown_timeout = client_shutdown_timeout + self.num_rounds = None + self.client_app = client_app + + def get_connector(self, fl_ctx: FLContext): + return GrpcClientConnector( + int_server_grpc_options=self.int_server_grpc_options, + per_msg_timeout=self.per_msg_timeout, + tx_timeout=self.tx_timeout, + ) + + def get_applet(self, fl_ctx: FLContext): + return FlowerClientApplet(self.client_app) + + def configure(self, config: dict, fl_ctx: FLContext): + self.num_rounds = config.get(Constant.CONF_KEY_NUM_ROUNDS) + + def get_connector_config(self, fl_ctx: FLContext) -> dict: + return {Constant.CONF_KEY_NUM_ROUNDS: self.num_rounds} diff --git a/nvflare/app_opt/flower/grpc_client.py b/nvflare/app_opt/flower/grpc_client.py new file mode 100644 index 0000000000..67f14fe72e --- /dev/null +++ b/nvflare/app_opt/flower/grpc_client.py @@ -0,0 +1,100 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import flwr.proto.grpcadapter_pb2 as pb2 +from flwr.proto.grpcadapter_pb2_grpc import GrpcAdapterStub + +from nvflare.app_opt.flower.defs import GRPC_DEFAULT_OPTIONS +from nvflare.fuel.utils.grpc_utils import create_channel +from nvflare.fuel.utils.obj_utils import get_logger + +from .utils import reply_should_exit + + +class GrpcClient: + """This class implements a gRPC Client that is capable of sending Flower requests to a Flower gRPC Server.""" + + def __init__(self, server_addr, grpc_options=None): + """Constructor + + Args: + server_addr: address of the gRPC server to connect to + grpc_options: gRPC options for the gRPC client + """ + if not grpc_options: + grpc_options = GRPC_DEFAULT_OPTIONS + + self.stub = None + self.channel = None + self.server_addr = server_addr + self.grpc_options = grpc_options + self.started = False + self.logger = get_logger(self) + + def start(self, ready_timeout=10): + """Start the gRPC client and wait for the server to be ready. + + Args: + ready_timeout: how long to wait for the server to be ready + + Returns: None + + """ + if self.started: + return + + self.started = True + + self.channel = create_channel( + server_addr=self.server_addr, + grpc_options=self.grpc_options, + ready_timeout=ready_timeout, + test_only=False, + ) + self.stub = GrpcAdapterStub(self.channel) + + def send_request(self, request: pb2.MessageContainer): + """Send Flower request to gRPC server + + Args: + request: grpc request + + Returns: a pb2.MessageContainer object + + """ + self.logger.info(f"sending {len(request.grpc_message_content)} bytes: {request.grpc_message_name=}") + try: + result = self.stub.SendReceive(request) + except Exception as ex: + self.logger.warning(f"exception occurred communicating to Flower server: {ex}") + return reply_should_exit() + + if not isinstance(result, pb2.MessageContainer): + self.logger.error(f"expect reply to be pb2.MessageContainer but got {type(result)}") + return None + return result + + def stop(self): + """Stop the gRPC client + + Returns: None + + """ + ch = self.channel + self.channel = None # set to None in case another thread also tries to close. + if ch: + try: + ch.close() + except: + # ignore errors when closing the channel + pass diff --git a/nvflare/app_opt/flower/grpc_server.py b/nvflare/app_opt/flower/grpc_server.py new file mode 100644 index 0000000000..0eb09a9f7a --- /dev/null +++ b/nvflare/app_opt/flower/grpc_server.py @@ -0,0 +1,82 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import concurrent.futures as futures + +import grpc +from flwr.proto.grpcadapter_pb2_grpc import GrpcAdapterServicer, add_GrpcAdapterServicer_to_server + +from nvflare.app_opt.flower.defs import GRPC_DEFAULT_OPTIONS +from nvflare.fuel.utils.obj_utils import get_logger +from nvflare.fuel.utils.validation_utils import check_object_type, check_positive_int +from nvflare.security.logging import secure_format_exception + + +class GrpcServer: + """This class implements a gRPC Server that is capable of processing Flower requests.""" + + def __init__(self, addr, max_workers: int, grpc_options, servicer): + """Constructor + + Args: + addr: the listening address of the server + max_workers: max number of workers + grpc_options: gRPC options + servicer: the servicer that is capable of processing Flower requests + """ + if not grpc_options: + grpc_options = GRPC_DEFAULT_OPTIONS + + check_object_type("servicer", servicer, GrpcAdapterServicer) + check_positive_int("max_workers", max_workers) + self.grpc_server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers), options=grpc_options) + add_GrpcAdapterServicer_to_server(servicer, self.grpc_server) + self.logger = get_logger(self) + + try: + # TBD: will be enhanced to support secure port + self.grpc_server.add_insecure_port(addr) + self.logger.info(f"Flower gRPC Server: added insecure port at {addr}") + except Exception as ex: + self.logger.error(f"cannot listen on {addr}: {secure_format_exception(ex)}") + + def start(self, no_blocking=False): + """Called to start the server + + Args: + no_blocking: whether blocking the current thread and wait for server termination + + Returns: None + + """ + self.logger.info("starting Flower gRPC Server") + self.grpc_server.start() + if no_blocking: + # don't wait for server termination + return + else: + self.grpc_server.wait_for_termination() + self.logger.info("Flower gRPC server terminated") + + def shutdown(self): + """Shut down the gRPC server gracefully. + + Returns: + + """ + self.logger.info("shutting down Flower gRPC server") + server = self.grpc_server + self.grpc_server = None # in case another thread calls shutdown at the same time + if server: + server.stop(grace=0.5) diff --git a/nvflare/app_opt/flower/mock/__init__.py b/nvflare/app_opt/flower/mock/__init__.py new file mode 100644 index 0000000000..d9155f923f --- /dev/null +++ b/nvflare/app_opt/flower/mock/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nvflare/app_opt/flower/mock/applet.py b/nvflare/app_opt/flower/mock/applet.py new file mode 100644 index 0000000000..c2004035ec --- /dev/null +++ b/nvflare/app_opt/flower/mock/applet.py @@ -0,0 +1,76 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from nvflare.app_common.tie.cli_applet import CLIApplet, CommandDescriptor +from nvflare.app_common.tie.py_applet import PyApplet, PyRunner +from nvflare.app_opt.flower.defs import Constant + +from .flower_client import train + + +class MockClientApplet(CLIApplet): + def __init__(self): + CLIApplet.__init__(self) + + def get_command(self, app_ctx: dict) -> CommandDescriptor: + main_module = "nvflare.app_opt.flower.mock.flower_client" + addr = app_ctx.get(Constant.APP_CTX_SERVER_ADDR) + num_rounds = app_ctx.get(Constant.APP_CTX_NUM_ROUNDS) + client_name = app_ctx.get(Constant.APP_CTX_CLIENT_NAME) + + return CommandDescriptor( + cmd=f"python -m {main_module} -a {addr} -n {num_rounds} -c {client_name}", + log_file_name="flower_client_log.txt", + stdout_msg_prefix="FLWR-CA", + ) + + +class MockServerApplet(CLIApplet): + def __init__(self): + CLIApplet.__init__(self) + + def get_command(self, app_ctx: dict) -> CommandDescriptor: + main_module = "nvflare.app_opt.flower.mock.flower_server" + addr = app_ctx.get(Constant.APP_CTX_SERVER_ADDR) + num_rounds = app_ctx.get(Constant.APP_CTX_NUM_ROUNDS) + + return CommandDescriptor( + cmd=f"python -m {main_module} -a {addr} -n {num_rounds}", + log_file_name="flower_server_log.txt", + stdout_msg_prefix="FLWR-SA", + ) + + +class MockClientPyRunner(PyRunner): + def __init__(self): + self.stopped = False + + def start(self, app_ctx: dict): + addr = app_ctx.get(Constant.APP_CTX_SERVER_ADDR) + client_name = app_ctx.get(Constant.APP_CTX_CLIENT_NAME) + train(server_addr=addr, client_name=client_name) + self.stopped = True + + def stop(self, timeout: float): + pass + + def is_stopped(self) -> (bool, int): + return self.stopped, 0 + + +class MockClientPyApplet(PyApplet): + def __init__(self, in_process=True): + PyApplet.__init__(self, in_process) + + def get_runner(self, app_ctx: dict) -> PyRunner: + return MockClientPyRunner() diff --git a/nvflare/app_opt/flower/mock/controller.py b/nvflare/app_opt/flower/mock/controller.py new file mode 100644 index 0000000000..ea6999dc1d --- /dev/null +++ b/nvflare/app_opt/flower/mock/controller.py @@ -0,0 +1,25 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nvflare.apis.fl_context import FLContext +from nvflare.app_opt.flower.controller import FlowerController +from nvflare.app_opt.flower.mock.applet import MockServerApplet + + +class MockController(FlowerController): + def __init__(self, num_rounds: int): + FlowerController.__init__(self, num_rounds=num_rounds) + + def get_applet(self, fl_ctx: FLContext): + return MockServerApplet() diff --git a/nvflare/app_opt/flower/mock/echo_servicer.py b/nvflare/app_opt/flower/mock/echo_servicer.py new file mode 100644 index 0000000000..e8a798e6d3 --- /dev/null +++ b/nvflare/app_opt/flower/mock/echo_servicer.py @@ -0,0 +1,49 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import flwr.proto.grpcadapter_pb2 as pb2 +from flwr.proto.grpcadapter_pb2_grpc import GrpcAdapterServicer + +from nvflare.fuel.utils.obj_utils import get_logger + + +class EchoServicer(GrpcAdapterServicer): + def __init__(self, num_rounds): + self.logger = get_logger(self) + self.num_rounds = num_rounds + self.server = None + self.stopped = False + + def set_server(self, s): + self.server = s + + def SendReceive(self, request: pb2.MessageContainer, context): + msg_name = request.grpc_message_name + headers = request.metadata + content = request.grpc_message_content + self.logger.info(f"got {msg_name=}: {headers=} {content=}") + + round_num = int(headers.get("round")) + if round_num >= self.num_rounds: + # stop the server + self.logger.info(f"got round number {round_num}: ask to shutdown server") + self.server.shutdown() + self.stopped = True + + headers["round"] = str(round_num + 1) + return pb2.MessageContainer( + metadata=headers, + grpc_message_name=msg_name, + grpc_message_content=content, + ) diff --git a/nvflare/app_opt/flower/mock/executor.py b/nvflare/app_opt/flower/mock/executor.py new file mode 100644 index 0000000000..87a239d46b --- /dev/null +++ b/nvflare/app_opt/flower/mock/executor.py @@ -0,0 +1,34 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nvflare.apis.fl_context import FLContext +from nvflare.app_opt.flower.executor import FlowerExecutor +from nvflare.app_opt.flower.mock.applet import MockClientApplet, MockClientPyApplet + + +class MockExecutor(FlowerExecutor): + def __init__(self): + FlowerExecutor.__init__(self) + + def get_applet(self, fl_ctx: FLContext): + return MockClientApplet() + + +class MockPyExecutor(FlowerExecutor): + def __init__(self, in_process=True): + FlowerExecutor.__init__(self) + self.in_process = in_process + + def get_applet(self, fl_ctx: FLContext): + return MockClientPyApplet(self.in_process) diff --git a/nvflare/app_opt/flower/mock/flower_client.py b/nvflare/app_opt/flower/mock/flower_client.py new file mode 100644 index 0000000000..e2f4d7ab19 --- /dev/null +++ b/nvflare/app_opt/flower/mock/flower_client.py @@ -0,0 +1,108 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +import os +import sys +import time + +import flwr.proto.grpcadapter_pb2 as pb2 + +from nvflare.app_opt.flower.grpc_client import GrpcClient +from nvflare.fuel.utils.time_utils import time_to_string + + +def log(msg: str): + for i in range(5): + print(f"\r{i}", end=" ") + sys.stdout.flush() + print("\nend") + print(f"{time_to_string(time.time())}: {msg}") + sys.stdout.flush() + + +def train(server_addr, client_name): + log(f"starting client {client_name} to connect to server at {server_addr}") + client = GrpcClient(server_addr=server_addr) + client.start() + + total_time = 0 + total_reqs = 0 + next_round = 0 + while True: + log(f"Test round {next_round}") + data = os.urandom(10) + + headers = { + "target": "server", + "round": str(next_round), + "origin": client_name, + } + req = pb2.MessageContainer( + grpc_message_name="abc", + grpc_message_content=data, + ) + req.metadata.update(headers) + + start = time.time() + result = client.send_request(req) + total_reqs += 1 + total_time += time.time() - start + if not isinstance(result, pb2.MessageContainer): + log(f"expect reply to be pb2.MessageContainer but got {type(result)}") + elif result.grpc_message_name != req.grpc_message_name: + log("ERROR: msg_name does not match request") + elif result.grpc_message_content != data: + log("ERROR: result does not match request") + else: + log("OK: result matches request!") + + result_headers = result.metadata + should_exit = result_headers.get("should-exit") + if should_exit: + log("got should-exit!") + break + + next_round = result_headers.get("round") + time.sleep(1.0) + + time_per_req = total_time / total_reqs + log(f"DONE: {total_reqs=} {total_time=} {time_per_req=}") + + +def main(): + logging.basicConfig() + logging.getLogger().setLevel(logging.INFO) + + parser = argparse.ArgumentParser() + parser.add_argument("--addr", "-a", type=str, help="server address", required=True) + parser.add_argument("--client_name", "-c", type=str, help="client name", required=True) + parser.add_argument("--num_rounds", "-n", type=int, help="number of rounds", required=True) + args = parser.parse_args() + + if not args.addr: + raise RuntimeError("missing server address '--addr/-a' in command") + + if not args.num_rounds: + raise RuntimeError("missing num rounds '--num_rounds/-n' in command") + + if args.num_rounds <= 0: + raise RuntimeError("bad num rounds '--num_rounds/-n' in command: must be > 0") + + train(args.addr, args.client_name) + + +if __name__ == "__main__": + main() diff --git a/nvflare/app_opt/flower/mock/flower_server.py b/nvflare/app_opt/flower/mock/flower_server.py new file mode 100644 index 0000000000..f09d6688b9 --- /dev/null +++ b/nvflare/app_opt/flower/mock/flower_server.py @@ -0,0 +1,48 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging + +from nvflare.app_opt.flower.grpc_server import GrpcServer +from nvflare.app_opt.flower.mock.echo_servicer import EchoServicer + + +def main(): + logging.basicConfig() + logging.getLogger().setLevel(logging.INFO) + + parser = argparse.ArgumentParser() + parser.add_argument("--max_workers", "-w", type=int, help="max number of workers", required=False, default=20) + parser.add_argument("--addr", "-a", type=str, help="server address", required=True) + parser.add_argument("--num_rounds", "-n", type=int, help="number of rounds", required=True) + args = parser.parse_args() + + if not args.addr: + raise RuntimeError("missing server address '--addr/-a' in command") + + print(f"starting server: {args.addr=} {args.max_workers=} {args.num_rounds=}") + servicer = EchoServicer(args.num_rounds) + server = GrpcServer( + args.addr, + max_workers=args.max_workers, + grpc_options=None, + servicer=servicer, + ) + servicer.set_server(server) + server.start() + + +if __name__ == "__main__": + main() diff --git a/nvflare/app_opt/flower/utils.py b/nvflare/app_opt/flower/utils.py new file mode 100644 index 0000000000..6b271d8749 --- /dev/null +++ b/nvflare/app_opt/flower/utils.py @@ -0,0 +1,68 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import flwr.proto.grpcadapter_pb2 as pb2 + +from nvflare.apis.shareable import Shareable + +from .defs import Constant + + +def msg_container_to_shareable(msg: pb2.MessageContainer) -> Shareable: + """Convert Flower-defined MessageContainer object to a Shareable object. + This function is typically used to in two cases: + 1. Convert Flower-client generated request to Shareable before sending it to FLARE Server via RM. + 2. Convert Flower-server generated response to Shareable before sending it back to FLARE client. + + Args: + msg: MessageContainer object to be converted + + Returns: a Shareable object + + """ + s = Shareable() + headers = msg.metadata + if headers is not None: + # must convert msg.metadata to dict; otherwise it is not serializable. + headers = dict(msg.metadata) + s[Constant.PARAM_KEY_CONTENT] = msg.grpc_message_content + s[Constant.PARAM_KEY_HEADERS] = headers + s[Constant.PARAM_KEY_MSG_NAME] = msg.grpc_message_name + return s + + +def shareable_to_msg_container(s: Shareable) -> pb2.MessageContainer: + """Convert Shareable object to Flower-defined MessageContainer + This function is typically used to in two cases: + 1. Convert a Shareable object received from FLARE client to MessageContainer before sending it to Flower server. + 2. Convert a Shareable object received from FLARE server to MessageContainer before sending it to Flower client. + + Args: + s: the Shareable object to be converted + + Returns: a MessageContainer object + + """ + m = pb2.MessageContainer( + grpc_message_name=s.get(Constant.PARAM_KEY_MSG_NAME), + grpc_message_content=s.get(Constant.PARAM_KEY_CONTENT), + ) + headers = s.get(Constant.PARAM_KEY_HEADERS) + if headers: + # Note: headers is a dict, but m.metadata is Google defined MapContainer, which is subclass of dict. + m.metadata.update(headers) + return m + + +def reply_should_exit() -> pb2.MessageContainer: + return pb2.MessageContainer(metadata={"should-exit": "true"}) diff --git a/nvflare/fuel/utils/grpc_utils.py b/nvflare/fuel/utils/grpc_utils.py new file mode 100644 index 0000000000..3adc6b003b --- /dev/null +++ b/nvflare/fuel/utils/grpc_utils.py @@ -0,0 +1,42 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import grpc + + +def create_channel(server_addr, grpc_options, ready_timeout: float, test_only: bool): + """Create gRPC channel and waits for the server to be ready + + Args: + server_addr: the gRPC server address to connect to + grpc_options: gRPC client connection options + ready_timeout: how long to wait for the server to be ready + test_only: whether for testing the server readiness only + + Returns: the gRPC channel created. Bit if test_only, the channel is closed and returns None. + + If the server does not become ready within ready_timeout, the RuntimeError exception will raise. + + """ + channel = grpc.insecure_channel(server_addr, options=grpc_options) + + # wait for channel ready + try: + grpc.channel_ready_future(channel).result(timeout=ready_timeout) + except grpc.FutureTimeoutError: + raise RuntimeError(f"cannot connect to server after {ready_timeout} seconds") + + if test_only: + channel.close() + channel = None + return channel diff --git a/nvflare/private/aux_runner.py b/nvflare/private/aux_runner.py index 1b338bd037..be7bc8d436 100644 --- a/nvflare/private/aux_runner.py +++ b/nvflare/private/aux_runner.py @@ -152,7 +152,7 @@ def _wait_for_cell(self): ) start = time.time() - self.logger.info(f"waiting for cell for {self.cell_wait_timeout} seconds") + self.logger.debug(f"waiting for cell for {self.cell_wait_timeout} seconds") while True: cell = self.engine.get_cell() if cell: diff --git a/nvflare/private/fed/client/client_app_runner.py b/nvflare/private/fed/client/client_app_runner.py index e3a84794b8..4ac3d3cb09 100644 --- a/nvflare/private/fed/client/client_app_runner.py +++ b/nvflare/private/fed/client/client_app_runner.py @@ -86,7 +86,11 @@ def create_client_runner(self, app_root, args, config_folder, federated_client, client_config_file_name = os.path.join(app_root, args.client_config) args.set.append(f"secure_train={secure_train}") conf = ClientJsonConfigurator( - config_file_name=client_config_file_name, app_root=app_root, args=args, kv_list=args.set + workspace_obj=workspace, + config_file_name=client_config_file_name, + app_root=app_root, + args=args, + kv_list=args.set, ) if event_handlers: conf.set_component_build_authorizer(authorize_build_component, fl_ctx=fl_ctx, event_handlers=event_handlers) diff --git a/nvflare/private/fed/client/client_json_config.py b/nvflare/private/fed/client/client_json_config.py index 238d1c13e6..c0f3770ad9 100644 --- a/nvflare/private/fed/client/client_json_config.py +++ b/nvflare/private/fed/client/client_json_config.py @@ -17,6 +17,7 @@ from nvflare.apis.executor import Executor from nvflare.apis.fl_component import FLComponent from nvflare.apis.fl_constant import SystemConfigs, SystemVarName +from nvflare.apis.workspace import Workspace from nvflare.fuel.utils.argument_utils import parse_vars from nvflare.fuel.utils.config_service import ConfigService from nvflare.fuel.utils.json_scanner import Node @@ -37,7 +38,9 @@ def __init__(self): class ClientJsonConfigurator(FedJsonConfigurator): - def __init__(self, config_file_name: str, args, app_root: str, kv_list=None, exclude_libs=True): + def __init__( + self, workspace_obj: Workspace, config_file_name: str, args, app_root: str, kv_list=None, exclude_libs=True + ): """To init the ClientJsonConfigurator. Args: @@ -67,6 +70,7 @@ def __init__(self, config_file_name: str, args, app_root: str, kv_list=None, exc SystemVarName.WORKSPACE: args.workspace, SystemVarName.ROOT_URL: sp_url, SystemVarName.SECURE_MODE: self.cmd_vars.get("secure_train", True), + SystemVarName.JOB_CUSTOM_DIR: workspace_obj.get_app_custom_dir(args.job_id), } FedJsonConfigurator.__init__( diff --git a/nvflare/private/fed/server/server_app_runner.py b/nvflare/private/fed/server/server_app_runner.py index 5af8ea891a..d078adf31d 100644 --- a/nvflare/private/fed/server/server_app_runner.py +++ b/nvflare/private/fed/server/server_app_runner.py @@ -57,7 +57,11 @@ def start_server_app( server_config_file_name = os.path.join(app_root, args.server_config) conf = ServerJsonConfigurator( - config_file_name=server_config_file_name, app_root=app_root, args=args, kv_list=kv_list + workspace_obj=workspace, + config_file_name=server_config_file_name, + app_root=app_root, + args=args, + kv_list=kv_list, ) if event_handlers: fl_ctx = FLContext() diff --git a/nvflare/private/fed/server/server_json_config.py b/nvflare/private/fed/server/server_json_config.py index f58cb8a2f2..f93ef6805f 100644 --- a/nvflare/private/fed/server/server_json_config.py +++ b/nvflare/private/fed/server/server_json_config.py @@ -18,6 +18,7 @@ from nvflare.apis.fl_constant import SystemConfigs, SystemVarName from nvflare.apis.impl.controller import Controller from nvflare.apis.impl.wf_comm_server import WFCommServer +from nvflare.apis.workspace import Workspace from nvflare.fuel.utils.argument_utils import parse_vars from nvflare.fuel.utils.config_service import ConfigService from nvflare.fuel.utils.json_scanner import Node @@ -45,7 +46,9 @@ def __init__(self, id, controller: Controller): class ServerJsonConfigurator(FedJsonConfigurator): - def __init__(self, config_file_name: str, args, app_root: str, kv_list=None, exclude_libs=True): + def __init__( + self, workspace_obj: Workspace, config_file_name: str, args, app_root: str, kv_list=None, exclude_libs=True + ): """This class parses server config from json file. Args: @@ -70,6 +73,7 @@ def __init__(self, config_file_name: str, args, app_root: str, kv_list=None, exc SystemVarName.SITE_NAME: "server", SystemVarName.WORKSPACE: args.workspace, SystemVarName.SECURE_MODE: self.cmd_vars.get("secure_train", True), + SystemVarName.JOB_CUSTOM_DIR: workspace_obj.get_app_custom_dir(args.job_id), } FedJsonConfigurator.__init__( From a9684807b265b8f03cd12f14793c87e4362e12b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Tue, 30 Jul 2024 17:04:37 -0700 Subject: [PATCH 07/16] Add MetricsSender docstring (#2745) --- .../metrics_exchange/metrics_sender.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/nvflare/app_common/metrics_exchange/metrics_sender.py b/nvflare/app_common/metrics_exchange/metrics_sender.py index 9052104a78..f8a4d5861a 100644 --- a/nvflare/app_common/metrics_exchange/metrics_sender.py +++ b/nvflare/app_common/metrics_exchange/metrics_sender.py @@ -31,16 +31,28 @@ def __init__( read_interval: float = 0.1, heartbeat_interval: float = 5.0, heartbeat_timeout: float = 30.0, - topic: str = "metrics", pipe_channel_name=PipeChannelName.METRIC, ): + """MetricsSender is a special type of AnalyticsSender that uses `Pipe` to communicate. + + Args: + pipe_id (str): Identifier for obtaining the Pipe from NVFlare components. + read_interval (float): Interval for reading from the pipe. + heartbeat_interval (float): Interval for sending heartbeat to the peer. + heartbeat_timeout (float): Timeout for waiting for a heartbeat from the peer. + pipe_channel_name: the channel name for sending task requests. + + Note: + Users can use MetricsSender with `FilePipe`, `CellPipe`, or any other customize + `Pipe` class. + + """ super().__init__() self._pipe_id = pipe_id self._read_interval = read_interval self._heartbeat_interval = heartbeat_interval self._heartbeat_timeout = heartbeat_timeout self._pipe_handler = None - self._topic = topic self._pipe_channel_name = pipe_channel_name def handle_event(self, event_type: str, fl_ctx: FLContext): @@ -64,5 +76,5 @@ def handle_event(self, event_type: str, fl_ctx: FLContext): def add(self, tag: str, value: Any, data_type: AnalyticsDataType, **kwargs): data = create_analytic_dxo(tag=tag, value=value, data_type=data_type, **kwargs) - req = Message.new_request(topic=self._topic, data=data) + req = Message.new_request(topic="_metrics_sender", data=data) self._pipe_handler.send_to_peer(req) From fcf8eeaccb4160739211c9719ab4ced6e0588486 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Thu, 1 Aug 2024 17:40:30 -0700 Subject: [PATCH 08/16] Update MONAI example README (#2724) --- .../spleen_ct_segmentation_local/README.md | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/integration/monai/examples/spleen_ct_segmentation_local/README.md b/integration/monai/examples/spleen_ct_segmentation_local/README.md index 42772aa9e9..44b28e2670 100644 --- a/integration/monai/examples/spleen_ct_segmentation_local/README.md +++ b/integration/monai/examples/spleen_ct_segmentation_local/README.md @@ -160,17 +160,17 @@ Experiment tracking for the FLARE-MONAI integration now uses `NVFlareStatsHandle In this example, the `spleen_ct_segmentation_local` job is configured to automatically log metrics to MLflow through the FL server. -- The `config_fed_client.json` contains the `NVFlareStatsHandler`, `MetricsSender`, and `MetricRelay` (with their respective pipes) to send the metrics to the server side as federated events. -- Then in `config_fed_server.json`, the `MLflowReceiver` is configured for the server to write the results to the default MLflow tracking server URI. +- The `config_fed_client.conf` contains the `NVFlareStatsHandler`, `MetricsSender`, and `MetricRelay` (with their respective pipes) to send the metrics to the server side as federated events. +- Then in `config_fed_server.conf`, the `MLflowReceiver` is configured for the server to write the results to the MLflow tracking server URI `http://127.0.0.1:5000`. -With this configuration the MLflow tracking server must be started before running the job: +We need to start MLflow tracking server before running this job: ``` mlflow server ``` > **_NOTE:_** The receiver on the server side can be easily configured to support other experiment tracking formats. - In addition to the `MLflowReceiver`, the `WandBReceiver` and `TBAnalyticsReceiver` can also be used in `config_fed_server.json` for Tensorboard and Weights & Biases experiment tracking streaming to the server. + In addition to the `MLflowReceiver`, the `WandBReceiver` and `TBAnalyticsReceiver` can also be used in `config_fed_server.conf` for Tensorboard and Weights & Biases experiment tracking streaming to the server. Next, we can submit the job. @@ -219,10 +219,16 @@ nvflare job submit -j jobs/spleen_ct_segementation_he ### 5.4 MLflow experiment tracking results -To view the results, you can access the MLflow dashboard in your browser using the default tracking uri `http://127.0.0.1:5000`. - -> **_NOTE:_** To write the results to the server workspace instead of using the MLflow server, users can remove the `tracking_uri` argument from the `MLflowReceiver` configuration and instead view the results by running `mlflow ui --port 5000` in the directory that contains the `mlruns/` directory in the server workspace. +To view the results, you can access the MLflow dashboard in your browser using the tracking uri `http://127.0.0.1:5000`. Once the training is started, you can see the experiment curves for the local clients in the current run on the MLflow dashboard. -![MLflow dashboard](./mlflow.png) \ No newline at end of file +![MLflow dashboard](./mlflow.png) + + +> **_NOTE:_** If you prefer not to start the MLflow server before federated training, +> you can alternatively choose to write the metrics streaming results to the server's +> job workspace directory. Remove the tracking_uri argument from the MLflowReceiver +> configuration. After the job finishes, download the server job workspace and unzip it. +> You can view the results by running mlflow ui --port 5000 in the directory containing +> the mlruns/ directory within the server job workspace. From 6f500dc81b547102376584eb0de012a4a5f97d20 Mon Sep 17 00:00:00 2001 From: Yan Cheng <58191769+yanchengnv@users.noreply.github.com> Date: Fri, 2 Aug 2024 11:52:44 -0400 Subject: [PATCH 09/16] fix clone to keep original (#2755) --- .../app_common/storages/filesystem_storage.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/nvflare/app_common/storages/filesystem_storage.py b/nvflare/app_common/storages/filesystem_storage.py index b1d5547a0f..bfa82654f0 100644 --- a/nvflare/app_common/storages/filesystem_storage.py +++ b/nvflare/app_common/storages/filesystem_storage.py @@ -29,6 +29,20 @@ def _write(path: str, content, mv_file=True): + """Create a file at the specified 'path' with the specified 'content'. + + Args: + path: the path of the file to be created + content: content for the file to be created. It could be either bytes, or path (str) to the source file that + contains the content. + mv_file: whether the destination file should be created simply by moving the source file. This is applicable + only when the 'content' is the path of the source file. If mv_file is False, the destination is created + by copying from the source file, and the source file will remain intact; If mv_file is True, the + destination file is created by "move" the source file, and the original source file will no longer exist. + + Returns: + + """ tmp_path = path + "_" + str(uuid.uuid4()) try: Path(os.path.dirname(path)).mkdir(parents=True, exist_ok=True) @@ -162,7 +176,7 @@ def clone_object(self, from_uri: str, to_uri: str, meta: dict, overwrite_existin from_full_uri = self._object_path(from_uri) from_data_path = os.path.join(from_full_uri, DATA) - _write(data_path, from_data_path) + _write(data_path, from_data_path, mv_file=False) meta_path = os.path.join(full_uri, META) try: From de9ef95f76ada88654a7ced41347ca46f888be83 Mon Sep 17 00:00:00 2001 From: Isaac Yang Date: Fri, 2 Aug 2024 09:53:39 -0700 Subject: [PATCH 10/16] Bump up the version of monai-nvflare package to 0.2.8 (#2749) Also update its nvflare version to ~=2.5.0rc1, monai to >=1.3.1 --- integration/monai/setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/integration/monai/setup.py b/integration/monai/setup.py index a31011b3eb..8b41d0a099 100644 --- a/integration/monai/setup.py +++ b/integration/monai/setup.py @@ -24,14 +24,14 @@ release = os.environ.get("MONAI_NVFL_RELEASE") if release == "1": package_name = "monai-nvflare" - version = "0.2.4" + version = "0.2.9" else: package_name = "monai-nvflare-nightly" today = datetime.date.today().timetuple() year = today[0] % 1000 month = today[1] day = today[2] - version = f"0.2.3.{year:02d}{month:02d}{day:02d}" + version = f"0.2.9.{year:02d}{month:02d}{day:02d}" setup( name=package_name, @@ -57,5 +57,5 @@ long_description=long_description, long_description_content_type="text/markdown", python_requires=">=3.8,<3.11", - install_requires=["monai>=1.3.0", "nvflare==2.4.0rc6"], + install_requires=["monai>=1.3.1", "nvflare~=2.5.0rc1"], ) From 236f4a0644a7da44cd0267662d444b0cdf2be969 Mon Sep 17 00:00:00 2001 From: Hao-Wei Pang <45482070+hwpang@users.noreply.github.com> Date: Fri, 2 Aug 2024 13:46:13 -0400 Subject: [PATCH 11/16] Update getting_started.rst (#2737) * Update getting_started.rst * No need to mkdir With mkdir, the copied folder has structure simulator-example/hello-pt/jobs, while without mkdir, the copied folder has structure simulator-example/jobs * Update getting_started.rst * Add hello-pt to the folder structure --------- Co-authored-by: Sean Yang --- docs/getting_started.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting_started.rst b/docs/getting_started.rst index cee26e8f9e..171a5e6c8c 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -304,7 +304,7 @@ we can install these in the Python virtual environment by running: .. code-block:: shell source nvflare-env/bin/activate - python3 -m pip install -r simulator-example/requirements.txt + python3 -m pip install -r simulator-example/hello-pt/requirements.txt If using the Dockerfile above to run in a container, these dependencies have already been installed. From d9ef04159ab238f163d003cd553512f1f8f7a34d Mon Sep 17 00:00:00 2001 From: Zhijin Date: Fri, 2 Aug 2024 20:03:25 +0200 Subject: [PATCH 12/16] Add CIFAR 10 examples for Tensorflow-based FedAvg & FedOpt (#2704) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add alpha splitting * run experiments * add tensorboard writers; increase model size * fedopt version * add fedprox loss and callback * Update ModerateTFNet to match CIFAR10 torch implementation. * Fix multiprocessing GPU init error. Handle no alpha split case. * Add preprocessing to match torch CIFAR10 result. * Unify executor script for different algos. * Remove unused codes. * Add preprocessing steps to make TF results on par with torch examples. * Fix script executor args. * Add script to run all experiments. * Add README. * Fix graphs in README. * Modify TF FedOpt controller. * Update README and FedOpt result. * Remove duplicated flare init. * Fix result graph for centralized vs FedAvg. * Fix README re. alpha value for centralized training. * Improve README. * Add workspace arg. Change min_clients to num_clients. * Add warning on TF GPU vRAM allocation. * Clean up TB summary logs. * Remove FedProx which will be implemented in another PR. * Update notebook & README, re-add missing file. * Update license header. * Re-include missing script. * Remove change in torch example script. * Fix flake8, black and isort format issues. --------- Co-authored-by: Holger Roth Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- examples/getting_started/tf/README.md | 180 +++++++++++++++ .../tf/figs/fedavg-diff-algos.png | Bin 0 -> 37154 bytes .../tf/figs/fedavg-diff-alphas.png | Bin 0 -> 82496 bytes .../tf/figs/fedavg-vs-centralized.png | Bin 0 -> 31496 bytes .../tf/nvflare_tf_getting_started.ipynb | 14 +- examples/getting_started/tf/run_jobs.sh | 64 ++++++ .../tf/src/cifar10_data_split.py | 125 +++++++++++ .../getting_started/tf/src/cifar10_tf_fl.py | 13 +- .../tf/src/cifar10_tf_fl_alpha_split.py | 207 ++++++++++++++++++ examples/getting_started/tf/src/tf_net.py | 36 ++- .../tf/tf_fl_script_executor_cifar10.py | 140 ++++++++++++ nvflare/app_opt/tf/fedopt_ctl.py | 160 ++++++++++++++ nvflare/job_config/fed_job.py | 4 +- 13 files changed, 932 insertions(+), 11 deletions(-) create mode 100644 examples/getting_started/tf/README.md create mode 100755 examples/getting_started/tf/figs/fedavg-diff-algos.png create mode 100755 examples/getting_started/tf/figs/fedavg-diff-alphas.png create mode 100755 examples/getting_started/tf/figs/fedavg-vs-centralized.png create mode 100755 examples/getting_started/tf/run_jobs.sh create mode 100644 examples/getting_started/tf/src/cifar10_data_split.py create mode 100644 examples/getting_started/tf/src/cifar10_tf_fl_alpha_split.py create mode 100644 examples/getting_started/tf/tf_fl_script_executor_cifar10.py create mode 100644 nvflare/app_opt/tf/fedopt_ctl.py diff --git a/examples/getting_started/tf/README.md b/examples/getting_started/tf/README.md new file mode 100644 index 0000000000..3b168b01b0 --- /dev/null +++ b/examples/getting_started/tf/README.md @@ -0,0 +1,180 @@ +# Simulated Federated Learning with CIFAR10 Using Tensorflow + +This example shows `Tensorflow`-based classic Federated Learning +algorithms, namely FedAvg and FedOpt on CIFAR10 +dataset. This example is analogous to [the example using `Pytorch` +backend](https://github.com/NVIDIA/NVFlare/tree/main/examples/advanced/cifar10/cifar10-sim) +on the same dataset, where same experiments +were conducted and analyzed. You should expect the same +experimental results when comparing this example with the `Pytorch` one. + +In this example, the latest Client APIs were used to implement +client-side training logics (details in file +[`cifar10_tf_fl_alpha_split.py`](src/cifar10_tf_fl_alpha_split.py)), +and the new +[`FedJob`](https://github.com/NVIDIA/NVFlare/blob/main/nvflare/job_config/fed_job.py#L106) +APIs were used to programmatically set up an +`nvflare` job to be exported or ran by simulator (details in file +[`tf_fl_script_executor_cifar10.py`](tf_fl_script_executor_cifar10.py)), +alleviating the need of writing job config files, simplifying +development process. + +Before continuing with the following sections, you can first refer to +the [getting started notebook](nvflare_tf_getting_started.ipynb) +included under this folder, to learn more about the implementation +details, with an example walkthrough of FedAvg using a small +Tensorflow model. + +## 1. Install requirements + +Install required packages +``` +pip install --upgrade pip +pip install -r ./requirements.txt +``` + +> **_NOTE:_** We recommend either using a containerized deployment or virtual environment, +> please refer to [getting started](https://nvflare.readthedocs.io/en/latest/getting_started.html). + + +## 2. Run experiments + +This example uses simulator to run all experiments. The script +[`tf_fl_script_executor_cifar10.py`](tf_fl_script_executor_cifar10.py) +is the main script to be used to launch different experiments with +different arguments (see sections below for details). A script +[`run_jobs.sh`](run_jobs.sh) is also provided to run all experiments +described below at once: +``` +bash ./run_jobs.sh +``` +The CIFAR10 dataset will be downloaded when running any experiment for +the first time. `Tensorboard` summary logs will be generated during +any experiment, and you can use `Tensorboard` to visualize the +training and validation process as the experiment runs. Data split +files, summary logs and results will be saved in a workspace +directory, which defaults to `/tmp` and can be configured by setting +`--workspace` argument of the `tf_fl_script_executor_cifar10.py` +script. + +> [!WARNING] +> If you are using GPU, make sure to set the following +> environment variables before running a training job, to prevent +> `Tensoflow` from allocating full GPU memory all at once: +> `export TF_FORCE_GPU_ALLOW_GROWTH=true && export +> TF_GPU_ALLOCATOR=cuda_malloc_asyncp` + +The set-up of all experiments in this example are kept the same as +[the example using `Pytorch` +backend](https://github.com/NVIDIA/NVFlare/tree/main/examples/advanced/cifar10/cifar10-sim). Refer +to the `Pytorch` example for more details. Similar to the Pytorch +example, we here also use Dirichelet sampling on CIFAR10 data labels +to simulate data heterogeneity among data splits for different client +sites, controlled by an alpha value, ranging from 0 (not including 0) +to 1. A high alpha value indicates less data heterogeneity, i.e., an +alpha value equal to 1.0 would result in homogeneous data distribution +among different splits. + +### 2.1 Centralized training + +To simulate a centralized training baseline, we run FedAvg algorithm +with 1 client for 25 rounds, where each round consists of one single epoch. + +``` +python ./tf_fl_script_executor_cifar10.py \ + --algo centralized \ + --n_clients 1 \ + --num_rounds 25 \ + --batch_size 64 \ + --epochs 1 \ + --alpha 0.0 +``` +Note, here `--alpha 0.0` is a placeholder value used to disable data +splits for centralized training. + +### 2.2 FedAvg with different data heterogeneity (alpha values) + +Here we run FedAvg for 50 rounds, each round with 4 local epochs. This +corresponds roughly to the same number of iterations across clients as +in the centralized baseline above (50*4 divided by 8 clients is 25): +``` +for alpha in 1.0 0.5 0.3 0.1; do + + python ./tf_fl_script_executor_cifar10.py \ + --algo fedavg \ + --n_clients 8 \ + --num_rounds 50 \ + --batch_size 64 \ + --epochs 4 \ + --alpha $alpha + +done +``` + +### 2.3 Advanced FL algorithms (FedOpt) + +Next, let's try some different FL algorithms on a more heterogeneous split: + +[FedOpt](https://arxiv.org/abs/2003.00295) uses optimizers on server +side to update the global model from client-side gradients. Here we +use SGD with momentum and cosine learning rate decay: +``` +python ./tf_fl_script_executor_cifar10.py \ + --algo fedopt \ + --n_clients 8 \ + --num_rounds 50 \ + --batch_size 64 \ + --epochs 4 \ + --alpha 0.1 +``` + + +## 3. Results + +Now let's compare experimental results. + +### 3.1 Centralized training vs. FedAvg for homogeneous split +Let's first compare FedAvg with homogeneous data split +(i.e. `alpha=1.0`) and centralized training. As can be seen from the +figure and table below, FedAvg can achieve similar performance to +centralized training under homogeneous data split, i.e., when there is +no difference in data distributions among different clients. + +| Config | Alpha | Val score | +|-----------------|-------|-----------| +| cifar10_central | n.a. | 0.8758 | +| cifar10_fedavg | 1.0 | 0.8839 | + +![Central vs. FedAvg](./figs/fedavg-vs-centralized.png) + +### 3.2 Impact of client data heterogeneity + +Here we compare the impact of data heterogeneity by varying the +`alpha` value, where lower values cause higher heterogeneity. As can +be observed in the table below, performance of the FedAvg decreases +as data heterogeneity becomes higher. + +| Config | Alpha | Val score | +| ----------- | ----------- | ----------- | +| cifar10_fedavg | 1.0 | 0.8838 | +| cifar10_fedavg | 0.5 | 0.8685 | +| cifar10_fedavg | 0.3 | 0.8323 | +| cifar10_fedavg | 0.1 | 0.7903 | + +![Impact of client data +heterogeneity](./figs/fedavg-diff-alphas.png) + +### 3.3 Impact of different FL algorithms + +Lastly, we compare the performance of different FL algorithms, with +`alpha` value fixed to 0.1, i.e., a high client data heterogeneity. We +can observe from the figure below that, FedOpt achieves better +performance, with better convergence rates compared to FedAvg with the +same alpha setting. + +| Config | Alpha | Val score | +| ----------- | ----------- | ----------- | +| cifar10_fedavg | 0.1 | 0.7903 | +| cifar10_fedopt | 0.1 | 0.8145 | + +![Impact of different FL algorithms](./figs/fedavg-diff-algos.png) diff --git a/examples/getting_started/tf/figs/fedavg-diff-algos.png b/examples/getting_started/tf/figs/fedavg-diff-algos.png new file mode 100755 index 0000000000000000000000000000000000000000..ba581858591351d3cc875b96d940e6c1daab8880 GIT binary patch literal 37154 zcmd3P2T+q~_b=+Yd@GCYt|Cfxbyb=my@L&q5+Ed@Nl~gK0wRQ7bX`C}h)NFvN(m&C zNC_o0SA-}SP$2Z8A|-)<)CeJP-`ExfzV*)kKX)!Oj*!Xwv~!+Qey6-QjSY3Ucku1t z;NalaJ$uH4gJU~{gJVnoe|`i$`9*|k1N^te(?sVKM?tdy75L)^=aZn592|uxuH_5c zfxmyceAdE~gF`T!{cj6uc-j&8@Ha0lb1#Uylb6qhD~=pGR~#MPJTJO?*}(n-JaRip z_smJCuMNGQRcVC`np#~soL@S>e8Asw>+e2)oH;DV!}C`4QzyKYjWu@um#Vr(d;CB(aZ){d4HP0ekPj&(&XzF`ou{^dAw(mXDvF{~;Fp@i&g+ zTN|ODe%-glQS8&Nx32#8VfVhM31Z@mf@qLKi zKb~*zF;6iwbN8*|FG9&n#$q)-uj06TLG8-YB~caLN^g3jMn}2U1r^%-Q&oCN8;&_u zTf6iwT+@m*Y`fy?pyKoX_+CHXIOJDb_A%p@e&R=jo)+)b+@bEm>-c zKY!eK27lJFRgr@(!JZTiA8k${FZL;2lbP3IkDejSy6BD;@_lt}LMcl{3L;fK*Q$4v zMdo&AHk#F#s4tCWbYG}@zEDWa9KAn!m%jtk*6%ZzBFN@cfDkXEOoH9Ii?S34Dn?V2 z1&`jk5;D|^kDc&g1t!l?xeAk3gBbmzWl5Tj@L*rPqo1g9wSwPgv7)P~-FE`|OsUQ8 zk|WN0k{K!$_@KKgsJGOW;#$_cL{Ri($BH*TV-%3Lty8vMqPO%+dhHWWJ&LeEWbSIqj)YC!#zRKdCs9 zFWItC=vK2h;xxbLrPj}==e1QEQu(-jlLAMYG)bT>_VzA~_pg4hTbUMKE!#hS;5SO-l73?deI zmU3-baBXVNEd90S(o2+o-H?hpMo1r(Jm9lr&KfJF+AmKyYv!f~Ojgi1HQ%3{6*$AF z3|U!VRQx$Nm#`F{0l)B{>mD3gy11g!6=O(^)Km4Qn_+@kt5Y19KQf`R&%0HCVO4H#d)>XXzxGj^k@a?J8@)u|LhwvQ4Id z`Sz%0x3~u$IB6$e%T##zjxH}>6>B5)c_{uk>}H}yYHEz3bczbMIHb&da6paWb*!I# zlgM1BM{8MhcgD!|CsoE8l|EB$BM*cW!w@~GfiurWstl=WO>tPNXUO~4;m!tl%*nR& z+3P~$lj1_TDce45{gIW{BKFXHZi|TMDI|ZtDdSg00>8UJM}hS@YwvEyYIzkS6>8?` z2Ib*ch<_a+bc#mF-L;=n3O$ywIOstV1f7hx$xhF=O~R#v%5+I)-I(;_&AATx;fScG zb1JT41>`y{#6@kF?%=#&FI$h~`w+`tFrn!kM1#)gINv-jsj7$jQwuyqsrF-~mc8;A z3#dlJ5LGVEfQJX$6R1&*N|aI|ta^GhE04;Ry;MSnQ;*YsJKu%@k^NFy{o*wkzNyj@ zW2Mr9Y0SP+|Dm4)B;vBA$tr1kyFOCZlV>f9f09XwWD0IV27 zjL*0j)hHt5=?M2v>9|A*%KZy+M^V@j*PM-Licr9J>Z0PJ`CCaZ9!>#_54vzKA}ERm ztPLiL>;H9Z5eIIG!?IF+RE{ardoe9XM_hi6*%dT)nv7xfJkd16?DUtQtDIyABIL&1 z-PAr_yx&|FsSlGvEN?e!Pu9Hq^QH!RmC$L^8*4}1a_dKcR2b^-{ZLyit37|c&emkP zKpxizWudaBGpZ1prkWLxjk1Fs;l%v`XVZM#e3$lFLS*}Ys0C$TaKwd`37>A`oz<(< zD`yM@IR9++5=(8XYEv@^uRmGtWs~idn$@BXtZ+BCzHEC*y;7Z!xZXwAg+ZI^A2rtb zf(GkgZ)?nmgf+25)F6XlahCp@nO;NEUVCMZxCKG~SC6ha(RBqYtcNew%D|+yMhWZG znQckjX|@HsZ7(4l5t8X)tJrt>wCttM0&CFCE@TpJt1&7gD3n%|nxW8-V!pUcuQ|cA zf8hwKbmOmsslngF%&aMuEXA7XD4O7uua0L0t4o=$fO%ta=Gv~n`xVwzSg0W!{pRT+ zd_Z0}?&e5}8^2?2!^^q)Ev0jPfKFb)D&s*jNKN{(m&V1!ndoU)L<6uf{Sxm&L%NCJ zhZ8?vAOa!fTHn?CB+(RlJ74x9P44VXeuFTtdk>6#P z$d*y$GJFsnK`vKuDcgdsF&~1}!CufZNiRw;m5>*6RR5QvLCYd=WNS&Tfa@slP(TbhlTG;05=-BHV6-K_i{fHVJN!AKrxslo~#(t`-XEb zm$kG~R~ojUO7_}Wo*B7VCEKDVt15BPx6eoKu#>=xoV4KV3t7X*{rgJJfLtu)cU`GF z^*p2Rc=gFE*(8qXJ>yaL8lyLP>23F+f^@l4XkKUyUt@Yop$^E5^8 z;%`iCdN`nUB^_yJgvqug@tHgSlXV58)6+RJtnxU>%}T;X>>67vRcVY^!as_Yx3?^V zX=hn?`Qy_Kg21({C4=)^j_IfoDL_dg6u3K``fK0UOQG%L6@~UH@=VaJ1wLNaTW!cr z>rU|#BrVQ^E$XIbM+gQ;>6bdQACH9NNT|26FIm3RLI34%D$&jS1Mk8n?2(}|$SdH_ zcGYfe&g~(cl>>2tVWFr0-G` zi^T)t!VbgN^8#q8hA)C?+1%T5%NtLf_4(^x(tPr>z8l7*vR?m z=?uA}jqQAnxLu7_jUQ+@q~kMrw!rW@pgF-!Li z=KtRMW{`>V~srEYu5Ee=rS-wE))x+j2LzT>cI%?(lf z{1q1tKW)@;F^t8^1FQzHlKIv5>-ZN~9rCE*cYk&C(Y0++eGh+g82*3E#@}~}xs#Xr ziG(}+rhn!I#*Umxvdl1;KM7pO?KeJMSN1Sp&H(1~Cic6(Dir+wwYHo2Pen6e^lnFZ z6mZ9ZJG}S6#`p5O4xYV|EamQaY}!(LHZE;-d6Z%lJQG7J?R66u(%Jl;=AJ%gUF{Gy zOcDa;tF1geo!5W4{r8TP!m3eu{sj3>7pwl3Uz>KeXbj|76}8T`&(am~a<;9DCBd^1 zwD&&w6UrW>m$LV+453qF28)Rg#_g9Es!VPCm-|Y{yaQ$cOBYE%81!B>7dd9lq< zHQ#@-ViaA=YUnOq8E@cyU zY#WmY9}^pgkxIe)OQN&6q%}ET&OKR<-fD{90xHH>#}`DXgCirZ;1@e7T2 z?{Cd=WhqO=A}TJl)}Wx19$D%^6{FIk_XfAP9>rdK^D z=ZeUl-t+AD0iEAd&)uN5mKVk&RPgg%X!i&*2HTB2qr_h!~XHj?~1Ews814N1r!NlC>wMv0U|-=EzhfGs(%tK|~c$Rj-2 zI?CIk-eLT+%S*W&2fg}nx7$|Uv-33(3~l1GKsCH?0lo8VtXoM>i^F0*TjP{h0mg>G`H{OTDDxawV?MV$i zB?KfYxRlVg988`z9AFl9ONOWU+OVFezWeBAi{gV%1n`*o5I}$kn zsluy$QJRe;rQ)SD6w7n|#cH2z(Ab?_3w_fsnEr0>yP{LYSA!8lZ+4}?Tr1rnElI+G z_V#U@9l|DZV@vOIdk1D-5h&r~q!&XY80X(yEH#%}7>tpqk@^6k@*G9sW>zJ4_vip2 zaSAJ!aG!c>;uIS3)G-x4s3XQGZ%wV~n@x0RB(MbZqGsiPYs>NzSQI-{U90i_-Jk*< zR_&BB;C%hbXl!a5bE>?6I69e}JMQg{FQ}Y^Oj(iK-7GkoxD zBYYUWJWDruUYY1jtrANzB7*vC7=*zk9p!l_#%slM{U1$Ex?W3ABi(BAD{&o!F7KQ&l>9A4@ll3z*duF1d>jTDhJJ z@VlqCTIJHxtDa=EhukC#A$iHI%x}IIS8T&FqdX&37x?!Rhx})YSxXU% ztHfi3e5t@A^?@y*!N3=4?~8fH(TfM%ZRZEkqZy+-Ek+@O)lLXcjf++H7kSG<>LP}c znOqg5iLHaj0!6Z|jFfKCx8&Fr2e~${CJ!!Sd^m9La|vJ=vB5R?eVy<^iteS6sq5tB zpocEk|B>yW)UKbE6zXpU{hk^;K3;SfbkocBFFNSF7K-0awyW!w{KYo#{t}Da)JV77 zpdV<3ORmIRmzT5co*hHa9oy_dD2x#!gQ?Frs+>;zrN<@b zCY#z?^snO16+61TcvtOSm9_LrGyvDzF@c*@cJlGA3ib-<*G3WSDWxMG^Y5ptm^b~T zal_$K9qa2ef<7t{r}8a_2<@^lNVgm!JW>?#Smv0xyz8`Y z#i;7D)>g$4!icR(npQ=_GHO)dAmXOqFIQ05-U$GkyVKlTk=VeXl!%d^l^O^FFyFTZ z48<3lXVBDomvFFG%Qo*a`NgR9mEa zpCfq7>c?Ue_GVoSNQKFO)HxfSLxu;c3m!Ms6afPZv){M)V=1Y$4 z?67Nsn_xvuEO-zX8~6gn%Ai*W`WiP)B#u4mIh;5aobiJI;XjcxVvWW(nUPG?pHkM9 z$1$Rc)yh=9)IwAHJw6$R2(i=*H(^Hy6|yv{l?u{-;v1+w)yG0bFkn9BvdC$%jT{$> z!_65;mh~k|p~J7vxoZ?I<@Ro3NgarGW(`u)qH$()symQ@ASyfb^!4j zNf)88be`q<-S?C3EV|M@6Dxx<=9F~GTKcCBc=USZ8{8xa zv<01L=t-jXM1u_*oZ2&Pn8;e6mOUmkuhyDpS97Cbf9YO~9=UrDh7PJdJSjD4J9`qv zi@2+200~HyRESxrc^tW!o!6*!k`HmVRNn-=B~hHYbaO`5`kaj3Bd&tJo{31$Sb}2Z zFNkPNi)z313ANQkPFHCo6L1gaM;;7c=?g8Yh_5ZEK;%F6nzwwahcP2hhrqE}j!9=u z9t9^7nwt8CN?fXZCq!1>NY5qGLG{j%Gw~5uJlg*(ga*i7Au~k|I78H;nL#{@^=T zMw948H=f^IOim`n+-Nx=?C{?_Vw8ttTmfZ&axlpj1D z(2#%BZdB*MbB$vd} zT$$?yUF?!PqH{-FcYdFB9$ER8P$Y=NtH^UcrbM}DM|tpJr-IwJ2#2mW?4nx6-7WA) zD!7@X(IFD*sAd=&XBh)^92S|C*3(wk>6aAJwB~|pT`c&k+L_MPwihMbBHH0_?Aaam z4gUayY~*Rmo4Z`FC{iZ6?M66_EK^*^`42NvT7$9>NJ%eUl_+0Za>@=(9(S++E+-k> zAvpUO(pK7qo5EDQqdlc~W;ExL;$!5290RoMU zKoMu()$LN;1hK%#i^nCC`LcUW6%gnJab1D}oVWfWFK}B9=TXj91ARi^oP2bAPh5AI<{+p1Ey1Hp@Y7xNzqUk06E2;swc z$Q8K7RN{3Tn2q7*Oe@IET3L2PYDMGOxTxPFp4~qsGuMm1xF?%Pe@K2ay6w|O0&l~7 zO5OTfi*e%@Lo_w&%#3LhD=^8y) zf7+xiDlR$92K5^U=2R6{Aq79Q9_>si)cFOEcrl9kk z@rZwJs+B zNkMKneaEw4=KJU1MeH=P()^rkMb?FL)6OT%^Z{l;$H>H`-ts@-N*e9wY8{v ze_(+~l)N(R*2rX;YpDgcivk200gJ5z8_{V#E7xHI%09NE4V=vt{wC-%KD#-h*P5rN z4~fd{9V^|F+5b8SrhPa|k5qDkn^+Ke>h4$IF2ADNrH!%EBau4{^MB$)gkqR7>Yj|` z+_HhdgIU-u17;ZzWt_a?ltsXp(_GKbHi&HRibd5muoGKiev2XD&aon1F5Hd z^%o|~VAtKuyTzH8qPCsIinwap?8y-`_bj9HeXu0fKxp4$Y-oO-9LpgnB59=YWpI5? z#}N=J5HD};$w)g0*kOq<=6OS~gEkmf>+yCecsoLGs04KuUj%b>70482B&PRL&x2zw zA3w78%1~0AQ~fX-Lwe}yliP865bkqCCtqcia}LAyCPgL|Nxk=iW90#%PFtYxQrz-%zv;S(sYyG_qS z0dcwJNH+j@WkHtL&5~A7M%RnR}u#hEchF?X0&7LuOc& zG@U0t|2p7xE-0NBR#%eN-sd{H+X8fiC#T|+=boNFf=fel3pz2g#Qs}MoN!31A~x*| zAZY&qZq|#FNNI6Fb=&RK>0VRQqqWZSXLxh4^(!^48Y+XnZ^zN;;8D+^=bY-LdiwdS zZjE0cmrQFBvoK2YtdGDIj-%X=pK!m)SN9)qdEMhK8?0{VW+~lg7hMokr*9|Io)=9FFM01i-oYmg-@a2PuSt?Nu7Ep zrbqhoi85W5QgN$&jD+k-t2UOp?yd;(Ry+PQ(sbpZvdz7OqT~|oTrQ|vUZrhv`47rKR{RF!61Zb z+l@6sdV|OSR~#(Q5%(K3`4vKz)puteQrO{gsYnxqNQt&X514`e?)JE;azn@#aSx|N z8Ckt6bOK(05zTec`AiFd`6wyu1e=Y>fhs6QlPxhPv*589kJ~x*ruRk+>X>?mF$*C? zSf6%Eg4VhXt}f)`!((e0s*xo-TNm-xJ)%eTeBNIy&c_Tve8$7??uvY$r%CeS3^ zn+FZU?Sq}{kuX z9ed<#V)J|cvSnnDn&WaZ_B?DmUutpT^;a@wU2M|c+*Hhnus^s!jmH}Qnor6n;8x4S zJoG8A?l8=M$Z_|3@@n3m&@yw57nVHc zyCU%jrf4eHO|*L#Y9GcTyj>2EaQ< za?y`X5J{y?ZCzfrh-}G@-#ks;?H^kkmc^;EQ@S=VKx8Uf4^56XLaehd@aYVVOm6FM zJu+|ECc9wEr_<}6kZnSwY7pzcwGO_uhL4aQ&dlN6&e>M8+*kgb@e*>G4U9|cIb(}R z(!QY5sBxB2Gac_lhXC0}F_j?_Y8u;u{wvvvE<XL%-DMom=Je)F{6)V5!kpo&*)4lS@vx@?waF zq1X-s%AHAAV)O7LPro>_ToIIpQ?vGIm`R^0iOSR7y&cZXS#r6C29U`+Up(wJto@bY^|bHI zP4>jzaJS`(1Gy_l9-ILE`%`E4)42%MqP2PykGyL=yDZ3ZF5BS|-w4(ZvC^;AgOd*8 z9wvEayh;xW2xR_{dr#p-AIzjLan+9t?(Z)5u-|CEuc5I8gH>;C{|2k41;*xFkao(N zfo{h}v<7rcrh@O&iV8aKFg(kw&wgup9N1VR-(DH<49Uqb|Jj22bV*D{%CchdHo-`% z69m78{>K~m2vPu){K_Tim6^YJG&Ls}NTGzbs57jPBtahsk}^BDR!2Cfgq<0i3&gh^ zNWXqYpatN+FNCW^xRCYg$KrXMp^Eu6Gq-1;UeyY8+mI*33Hs4rzj(H5Dfa=Z?*|eP zA-8E&J0Lu`rCuSUp&SD;xBh3G{;d|B^`K@r-hRXt*iT}79y9{SwnKwbMj9dcNh(xC z+Rn${7>ggx{|{kO*zL)|+FBorENYbEfRepH{!R4W0_sDBk1OBH-1_#0W4O_i6=R*( z=(vRdqDyxeyz^y(K+z!NC58!zTX(4INLuAC#Yk4r@L%g0AxqgJ9t&hXw%?kOGsEC6 zvtI8C>B{&gSSc~G{YGP}m*)x)RrdcIIHVm3c|Quu>AiUb{vf03`t5>kE50!c`n!op z#Ahx6q8aP}sE!y!-p0!GL66YU90L}RfoY-s{?+xw%P92)^FxY1$7oGq>)+Q9=Y`$! z6b9ycv<%h@X9Q%=B#7}WE2w-LNY=HPH_W4m>wH*CBUPx`C3*xBgw zG8St5hx|x2JVP#xgv%%eeV#nk>_j2kY*U0_8}P~{7U2eUv*tVSH*z5xD{h0bhS$gz6`72V*&x6 zq@Jl8E1%+D5~U#il8|;0lg^YvHPz-!%bdJc@&X~G&D#%m{cQcOjFeq`@`jQ0py6Aa zkTg9VxP8%Y@mFA-SP57m-;mn$lik%p7+Rp!;KytxlAXoXi>o<3M{#;k!K{#ZUH#%A zP|;#z#EjSE-X0H0$U&G0R`y-+IolV7z6_544h|P@69#W)F<+_|&W3I6wv0~5B=5;9= zC>NWn7t?YIL!fl=dEf##lRTw6^Pu-}2V8k`*dvjFkxqXz(grHMjmV)J2`$?Onupwl z?a85&8L<2C;$YW-v`WDdC(A*zZ;jGT@a?uzj+Z^z_*!2W{%jDr!LfTM>|WZ@O!^?0RXFGjZU%W+k2oVR#RcT^T^61Cz__COjirx+>X2E;%j?s@7LN9 zry8&@;j+#~xomsYVCWjACtkQFuczeFX#yjv%=kX~a)|A*Bi|0-(fDmH)oUV0(#ST@ zcGm`c(OfE_GGIGxuy(;+)R?=x2EX^v*9xB^*kNE@wZ{B?UC!Ymd=XBW)^Q?vW)Y0t zEdlN)7$X-kvOd%MFFu|QBABmNkZaYP8`o{`J1WOG(T(|`e96;@#0HNhxb%AJeC-d1 zw{3L*E=O)z-xUK$X6kY(1p>~5d$O!`Y-Q|^L3wbNsxYP=h=go}7yj#5OHx<2lsqMg6VO!L8J^KY3%Me& z3+TN$g?r(b8Z~d6Huc@I?gDNA_@Z=eu8!Sc9i3Dnih>EIv>t99L7C=EaceGl=exy^ z4Zfx)+gx%6ibB@oATh)bbXhFaaUdl_foOSxH5U@_T(vEMflP`YKloY}e^Q@o2rGS) z>lf0Y1$L<0xEJwYpk%4h$$ZkgSo)^L@2K^c;A?v~rj$b$)rAKw6R;s}RiT(ebig}| zG<7R*J6DX*_7fu-*294lR*s41S^78Xkp4J!!t1hVT#n35oe48P|Bjg`G8#jlnOiaI z@3F$a2GY!Zm=);EyyN@UAFv;g^uInk?}+1;yL+)QSDvhNd4&LaSYxyuu_ZdEwd^;f6x1=laG4XaC$X7ds4otLcQ1>H3+wz0VNjT$EzP_L9Q7lEkc zN|fX=FVlU3>le3HAAd<_C7qHyapd(4mY5ck&VUYMJM<_$*r7g#3pVumj^b~m=SR3` z4Z?{EwY+NZ_*Z6Go>GP=s15YKa$t&TB>^=}RR5ZPKO(nl{Z5G|TmMkETYWZVp+2RB z>yV-FWrOd-+gOV|>=DkGiu~)`i#As!=6+=oo&`_rf`uTJ6$k!6Pvse}6PdY_0!o+f zuk-iqHFytj1wC1ziz~Kt-3$3`;kpX{Y!c5%*tguak!?x}lsK_>2{<-TTgEKH7F#iW zq(9Ihbov`hnVm(vA<#VLzoqDf&IIbZ!Ob18G6EQ4?HoG&jREyB{Iter(n;XD9jazm zs|Rsy6xPBiOt(ahW;W#=w^?^fKjt5P(Gz3vEk(C=CVVuWwjZ!|44q!bZQmd*I=o!B zOU|aW99odl4D7yj-GHvL^xHUjsh9Y4NB;&}+|d6amGd3|?1~6z5-^TY(@D&a`+ZEu zcCP2k^k`K5U!UZS2X0Bp#yH*NU8N&lxJc{%gHe06c5h=i%ipjIDS* z)B$W+t{@7{=WIH*ciq(A1kW1?952y1vwrO>3nwuO66(sT19JP#Ovi-&^}#|8i^|tI zn6o@|S$W0g@{(OL?4%!Te_pMtfW?L2^+wb;O1XgRw$iKEPnX)+s)sX)ontiN+XgSN zxUjPBaZ1XD3EIycw_JHrR5v<&6(R;E{0NWDX$u5SZ{#k|2=BTrY@`e-Bu)@DUbO?M zPntsLjVR$lHs<{#Sl`2FQX} z?YJS-^JvIHvveA<-X=ZlZr;P=|Ee~6w?8f4rIeKp<-7}kmle~z#obP!q5mp7x+P9D z_&vfZGt_rqeGap)9q@#Dw;c1|P_uG`uqC&1ypQ*3M2?CXyJZVeFY`DLp!wT zrjp_YNB&JUmGB-Y1~$G^ga?s9yTQtf>dE&uO|)7!?KkIjmiGX4OLlZfXXEJxZjakQ zVi%wm52*?@h?953M18AO*zUkuyOg^1?L03Y9?^c1^PY(Z-(lPiSci6sri*FfrF9b{ zNB`PwcyXO!eq;O7huaLXa@DS6$qsGn6L(-hg6@Ai>=@Uir6;?MptXx+cid=dKmlv~^ zWu3II$|!)GbJ#77FB#{e(D^d;)SQbH>PsDA@=o55AV%qwQmqPo!9MvQAqb#%6cLc0 z13ft^Elw=1@X+e7W1*iW0?Y{@Q<*1&$(Wk5HZAlYic)Oer;~n>g2u*T+pftF&h?~G zTPgZ{;wa4`XL?u$Uyp<4*a?#>Z=y2-yl#u$%vjK8UWHgP<#x(s5ljG-O+{c9_1`9I z&rbD+Yy0)!4Jf{rvM?>$Lz(#p;X*hdiC)N{oL%dBhd)dvV6vvrpTSu=*Vy9WKgCi7 zYOX7)8Ro(ROV@xc?Yb9ehMXdY8}1rO(*P1x=1zR8+1S?c$IN3Ojl9iBQ@zB1GTg=U zNW_uG9AMPI>%zDbFP9Al2LM=wqhWvbWc}8;QZuuVicxu~^2zd4h)+#v;p;b@^UK42 zM->#RhTihJ`nPcz26)b#}4a+4^B0~HrVeY zf@5r0k$O21+yC4)nXw9#rC$o8240#>i{9P!e2_DF5tXn2L}g&fBZ%FreSmOm`%y4O zQxEDo&{6|FYUB{dANK4UmwpW%6FcG%z#-=P7(d%1g^Q*MbKZ}r6*Ifu5C_0>bCZX( z*kxD9Kxk@14aifcCoMN+bSh^7fUj-t5QBzCCVb6(B_`IP*FcnK8|Th1H9`QNyyVNa zoP`2`Au9@!olaXy5_FUeu^AHriWQv!R}6Fqw@qg_tff9I>!_KqKZ_ZF$hdMF`>f&d zz#4e=A6*Q%@1>43r4JCF;I90exPveRM$)?Y4^=~~SMkcO)S__m68FQ0j14Qi{xPCy zz877gaoF)@^Hf0_yT#W{IKkXfz`#GJSd+kr`)Q;5^);B1QYpDpC5vE0P!74&fhR9p zY*Gy5Jl>K2t&nW5(A}j%Sjx=m_Hfc?o~~Ynl`TZHcjcvhcU_H&;*iD|lo$dP%(qQm zA|2nPZl-@j*J$|7eTYowbwd|=8{kQ|ikrZ2rKAfR7qGEih<|_#%MW{oc-HAq1~h5> zs6cCPwN0k}+RAh6a8tsGnhC2Ate}^1HXL!sc(n+xw87qwqc09y7Sx zy$hQx{A;ZktvyT?RV;96{OGVAJ=(8{i|)~+t$;nh?|BS+QeikTq`lj%W3XYfQpk;v zs-4*YXSJ<#3wY@PN%T%E0MNhEtcBa8=yHRkqVUM;hJv-Oj^4g{S=53IDJl;(Yx5)f z7OWDyq`;Y?*QrejHUZGh+k*H#-%~o;zM`!@)(*&^`GqJkRZ~~~CMTBbCY%QN<7-@M zCa_wpF`3)!0`85R9)L-yj6vP{JT|EZ{rWng6({aPoR1B50h%39@G2ca84Goro;70< zD4qSf>~y4h72<15d*e^IGMs`C-X(Qkqq(Td>5(8*k%lmlt_Nkf6C!Wgg8#61Z{| zyV#Sd8NRUyWGm-2!aFkB+}n5RJ|tnezFStWeR1IX9~>+#5;ZBaVgZUwC5V#p&t%7VVoO*Xp$niTY!1HzB@cZFbVSk0cv+YE*q&RyJu z&xpmy@s6)OZIYc71R$0bT@B$0Y(F=?_VN#_x=;T7tnH-}usy;w&AgD1Vwc*+cFNIX z`YsyBA^#fUxC5V-vnHe{FnfYso~ilYnHLt)}MlG8Cq4iB7uS zeY%?-6z5KXZ!BwxQW^OOs9>R{!c+(w*7rEdz41j}2Nc;$$6=GBmzpqV0U6LzvyzE9 z%T9?Y9x^<*DRZw2t=bpHV%^ndkf1y0$)m7jhH&A;yNUNPH@;`zB9Knu!U7_o)oYA1 zQ-cj5nd~4W4yZ4^BN$>*`0v%{i3>7Fpc<pGJqz#I3v#qEQH9^h=(`Au-3o&TBe>a1qk2%tSNcCJq z`@BvbLV=nV_TSEUosW!i5drQ+oTAVo5u=t8|6iW{wvK$#kp{T+fTal1VL28nugcMn z%GYmv_i?K0yA&uTV8x6w*!B-`5-3}~6w%HOEQ@{Lre9lP;I%pQgGfHccOBAea}4e* z&xPhPDu+$f3dd@By-Z7kYC}3-h9@L63bo# z9YnNfY}>(Flg!iwZF1*mI~{3nV4up*!}XJh%kLa1t_W{=E!+x_Hj%clIdRt7TjjCX z9z_6h%}?UfqSfOG^g z?X;kU?vW}zAUd{`9Gy`F^9pW?KNe?5_t@%)L+MIE2uD8b6ekZw_S^Mry4Nppf56YQ zz-RzX$|x&9l5GIL2J%!T*-q^}4`smFv~O){2MRm8funTBNg_0{VkVA2 z=x^8l5cFomYySH>=NF_f-VvuTXw=kZm4t46XdWZ4<4Ds1IXG(n67f9uEk_(&K4#TIF zhy5R4kZ5XWH^6!;gCs=@lHE9#^5ewBAnvuS1=>V$OT%B4KvyDQSzOdUSw*v5d6m_o z5WEuo%?L4SD(EyMbET}-BIN1&$ZOw}aDVOX4#$rF6rR*WUOv2#jXOn9m#BAI(Kv#X z4RP(jAA}_@B5HFy-UhR*4cN@Rwqp0=N zju=o6f27a=xS-I?#@?2d$N>!#Ue7yXo0%eCwf6?zY4()!xM1r~3QjygMk^TFdiiaeRY&QBo=>eZ|91JXz{FfVtaN^CH zkp!^pCc1A!1!j^&q`f~Bz|?khZ9lyU>#P+viGRtk(-_Rg74hd?+kf9=szc|7JT-zbdDj#ac`Pd=fYHstZN*e{S*5%mET3|{$wbg-c6=b9(KY*hFuyk=* z9oHs%{yMYkLysNQq8Ox&0A}ZgQxFQ0h=R@)Ff4NbD8|km4zIl|IJ3!6Ymd*%8nbYY zUp{P*E#5J179I_^dS4rF!WwIREusW_?s>C>w%20DuuPmW2(Kfwv30Q)=0!<1wl4sf zW?Dg%SWl*H@A73#!Teo!`a>|P$0!>S55E(zxg<)E_g#;W6GtzMKdrtAL z3@j8I(dOmae&H}KM~!la3B#lk0`0v8=13dcAZvX+`%*wYzcvEg@*^u1(^pE_k!ut8 zbH(DS0@pXCLvxDiH+0B|2hI@+1a9a69S~XYw%|ByPrIh#&L8bJw%gYEy|+v-7(~Vg zd%W|~Jywk&@}VdXR^>g^O(|^CG1g~+03GZsXXl;>>M%OA0pPpXf?xj0$!rI}Xz%to zF7O>p8SIE#2t~WK;t7!6p<8-DzmKyN4FFh^1~P@#L;D}~(3jLDx(p1Fis4KYv*wq- zWbS_jk_ivN_PE2{1a58O&aQRLbYihO5~|C$31G9d61jk6FjfsHLY>4IsI#M=O8W#i zUViT6heiTlHTZdOaQ+Q{OLCzCPLI-8UYtcH|veCKaj%oiAz7zD|I>r+Y;+~hF&FOb{ob3Zr!%1^@mHc#_O?cI|m8ux%9P+VzI z>_x)4QnbVs6_dx^Rp6%y7pS_Fr9&O_$86+lCa)7rNZ$c8kmf^ztdT8LMaAJ!z?Q@U zc`do3#=Z5l<3!LNJW<#>TgF=eQSoN`cL=XH9MFmjVyA|m974GREMtkaR7J+Vfn|wt zrtnE^vdguu%Js(fZeuEryh2#4jWFvtE2z22LmL2)_EwH!#7!>g1{lTWozGN#CO1l+ z->~!9*GSi9Eu+@*<^Q<<(?&w`f2lQF{<*UCd0^k+%52_8Zmo3uA2+xodVXjO^qt+b zHb_F)tnt=nN$3CnbmQAF3ucpcf$N~6%@6*+l^(L0c71qh4JQ0Q>oE9zoB`xlsrf$q z+C1E-Yu881a*aFo?_J#1=9r*qGV>ICFp?0=T57nj_~Uo_@4UrR*u=&V~~Vn=S&5W?cnMranrn6~hsa5$8rT@D=_+76#pz?8_8*n!BAbc@BVQh`PV_;?8m z6G4wBzfxMWfZXx(7pkX%rgB&2ZC43P?55yA1Mncg5ZMo40*dx0Gny)qOY2lj77uXdYb79jpXod*^m{ZsUjh%g$GO1 zsF129pn-O;!+cUpAjTi)a7wYl?;Lt|G%c@Yd05H}*c#)OOT%L&T22PK*{DBF3Y(_W^E?&^-v$TI{QIgb=++%G796LGqs&fx_wjo-aWTJ>ix{fe>hXqnc8 ztK~LXDK%>2oOdJ<`}Rxb1zY?yRroAjsu}D%UY!{;pkIYQVpN;wmBgSpfRlEI8dfE= z&H|t5)aLyKd}Q)EAEK%V987jf+`Af;*quAGI$e8Lmuy#aMM3j?sBxf^^F3N2NeIzw z>o*(+dbYprF>`Qrh1PM-oo;P!&0AGW#TkGPe> zJyU|fW<(P{Y$m!Czu4+c%}P*kbcw|R=zLDII-_skGTAI@cf$1!QyADEDDQy|E_IZl z<;@B4*yijHn-~O0y))@ZzXtudoS^XgN1RT#t}m8d>Moj?FV;$0O0$z)1=e45ho6&c z6qD?Ro2+IG3^+EnKf`Q~489*x!;z_JCB1)cyMc1OV|7}qluO{vQ~^40-d%6?W&w8V z?wL9UtWszHn0m(DqXVh{IUthauY=_MN=!34ybT7we1gpBJF>F+Zr7@@bpV|J+Mn;2 zIbO3w!J9hY^U!v0TUnjfT+wkiPj0B7;c{_Jl9hSH;rMe!Lt_9b_!yD4%XY;_@=DVw z$dFmZ5ZR2n`vxgLi$MrUwf>q4v%-hsaF%o*Nm-sDpI+B8FosWe6@uGdTl1gHy25uo zhyPq?N($CRgt*i_einzVA{~k8Xx0m9e^-OHVsD1>7OZhc>+Ro6xXf_TZ@_*IvkdRl z_yAps0Fu-{RKkz@*;X4L&xXR`+|amemn5+stJU;KN!(=BY_roKwn83qxUn5*np*6h zB<2njTjg>|mfu%ACI(?lbhqt!9gB@Q932yDBiXN)A|rpuTRWLQEc9Ud=01pII7K&eS(P#(8Ar_F$Osz`~U zKdz5J8VZ{Ka{-O*kyr(mlUqTMsO0mjJE9xD&+d?2iGJ6eG*)mGU;%(zL_RX_3WiL1)WlngsQD+K}@Si`b zf&Ae3`;yvQfH9T;!6b&SZh^+#lOFugOWQ&pHrnP zU~)9Z^v){#KGSB?Rs-3uJa8oa4Tv20vA2Y)FHs%lUG`VTB9mrx@oigeDnw;#VnNYX zrT||=4ydZ^D4O{XSOVY;iR)~MY60atBeaxDzHPscEqei>Y!;(Gbx2ogwgzoL&-1kv z)YHly(J>5=4e=wX4uD+;sw%pj;r>A{G;j7G9^i~AU{>4sT)BTBQ0nt$4v?GoUcz$f zULeKHg@Kny49uG=i9ujE+_u1BaoZ$8=RKd_aq+Oku0CSoWDYTt)CP2e9T55QRU^bLZ^E5bwXtOa8rUA-nU-7Xej#6&sq_3k|{&^`+-*pt7301v3gR6LDDc$$DR#(rth>D z00nG2@#o4U1CqbY&tVW3tc8;Va_p*oAt7JhJ^~r4KRE9QgxW$uEL(-_gRsww;4p|1 zF_9vUr91I*gEMI}jd9}L&YxdYQuX$*R0=o~QxDI3K(woGq?HdvHw^^u`!ei87!ZeY zrrkZPk3$qxgav)>+HHQ%W~o(EE%R(pchidgR8hy-Ab<=O)_{hXkp1*Zl}B&(XyJBB z&Y!xo)QUM<{r0ZDe?9F&cy&5)d8R2@9?E_pg7efrgyWE4y%t^p0btvybbZf1{vtNNu{=~qp3*j&w zp^6ewQ=~-}ZXt>2+FLr}KB*D@&vR5D1v1Ap-%@_4D59-APu`fI4YvxWBW%wd1G?$^ z_?3P+z5nsMUf7Ms_bLIUbGe?)%Fbcj{0a^v_SRhwxRNlg5|2fy55>eHiw|A$RVf%cQmzG;c6E#|{ z)Nw8{Ygd^xAd|3h=Qb-~vU28RWo{ppeTBW3Ju2oq*f1cwUHb^hvqil#aJ6=!2NBSj z0vJbRB)d(yr-q;=lC30%*hGA`lT&;C=^>OHK5K4nSSqAK(P~sUV4`q;*_OvZsA6>% zHv5KZR=Wg4*7KM-kc_Ca`bUFOhr_aa@TY^cX~5evs$Cj?mGv-Eva~hEGXR^)WS4n?B>|7u)&#QX@2_m8k|;$oy2LtNb6P z#n`4!)&lP;L6-GbuXp(v_3WQc$8;@BMU-Vg7F5JE3BHF%Xdh}QsK zfui8G0=zopSu_{0xcle84q4kz#&Q*2sRwj>Z(vKOE>I;>?HY^yU@Q!BdT4qmE2LGk z7J75pEt<)$(0he;+Z|d`)yarAfYnU5E(7?^$#a$ct{6XVY9@hWXgW0R$sWl2Q~(kS-et(xnt> zLWGFfB#@YJS1di}-23CbamRRLy!?ag>}IaH*P7oq=bE3Ahjm8Dil}e3pCjJnrDe~O zTE`b&ZYIkX*gi0BeG^|}=zQWRuDDhXssc5;m71q5CS9M@T$V`zC|Yjb5Ri|%!e8#Q z;@Z?#u$<)jinddt?DjlyRG}hC&(@?oFQ2>M9fmDCI~HIjKIgn2ghj=M2t^HA=LH8B z+9jF9&sOntAJR0+h-cdG?}6M1FAFSn6JOJv5bxG5S5)y2{&k<# zkcNb8sV-NTQ8M?_PSN?EBuj5cWXuSr55%iOkKE%_R+O&pGY@$x({P$fUH7;k{n_c{ zx_`K?4Scw6_2yORd$OLy9iNS7pZmE1@tozv4~AF!1SLwOx*b)=hH%09GMw|%j zd37R_O$>TZX|FzU*OnsS9yD0&4>720M&Ei1rK3(x>*^(RbN6UFufmOTt{4@AN2nup z84!DkuO^>#au1=IwihFfD_)dxZngh$qQG8z{rD5@gd;IN(a5js`ZOgYC9LTs31cay z#q&3;X~~^1#=63R+b7mS8Xex0pKv!g`WqWJY=yBl?Y-d68p@qTMxl)U3-Spws2wd3 zq7ZZTO%CFjv}NY%bpZJvBOgpRpIZ9$mcsThJSDx~yAN+p2ZB0=S(-A-UFH1VF*BAi z+pgPoT^B!>+&9niVSD)&!0!MXlnkK9rNb`6W=< z$jMie=Mo;sV`(4TdWR>8E#T>f(p2-KjVxMs?LUiQj!n<$t8p%$-+6|l7?@SEQQc(j z{l+DWewzp^5h#%C`DWv*Z#IbMAx+b~U)wqtQYj~FTiv>CLN+{WcTt1)q&}!%>V=c% z^t`FJ-Wx@p3i(Yn{Q2*itg#}NjYw7BR)+{Sh&9W zKF~Np=|<98RN**sNKDGTtD$R)Ru z9C9sn5P*+*`)S_1S@X=jLiQtU#k^|&sP=Cts~MJ;=jDzGOfn}cp7wDqcH|=}sZa$L ztyEn7om@!v2oE!tJHGwLpCH^a1x<~0fJoTEP2c}1C-;AoNc&&vbNmQs{`+Z-EP;GU z*_GMhcZ~_4fq*-Ep~bND+wux@zYP3_-wjmxo|I8SCo9ds`cVdAdN-C2L9(;i62qej z4Ip7f1WwD?&^~GZL%>0Oh z@W`k5^Wuhu%D72ljLw2^*MS2x#2{q@&WXi6iAJccya`AZH}dGl z#-Td{QPUZ!{cc7xIaIKmP)cR9`?r;4ZyA_LcN0k>A`KTMRbi{sdJ#F*lgSU$+k6f(hp ztXJtcr!YkU;eLlu8|Tw2`~o*#cYCw|uEI`8>y3@11zIEtVjSpM2OP25UjT&BB(6FV!MyQSj*94Gn{OPlA~FZ zHOlQS9a`G&<>i#(&frecfAhMW`SV4%k zUS`}Oz=+Ao9dH!U3wa;TIC3bCU=0@R9D6o1M5PFcp8?krMz`ZMHtBHVuiI8XFXxLK z7iBVbizr3mPC!mt&x4{3feu%FSSIEM*oxQY>><|&B+5N7jHNQJ44b8I&wA~DoRSN+ zHo!XE`QN$B$d?i!+s9M_L+^YOXOp@#?T>))NhS&vkzy*L0Y)K*JI~&Vd)E^ewyb=q zY0B=dG`f%rTiE5caf@`%i=3i|$t`J1CR95+BJ=)!_m#mv9eeVrSML@4YU0gY{v2#( zcdUN0z?JJ)aSln*O*}-Oa2lqDmlYV=lJWfGA#b0tDkVVNZSd2}<39W5*?6L!n&+mo z!rau{LZouwAo{H1oO%sf`g-qD&N_#*BL11{??4Fj1~rQ9(58B*<0<7DU%-w$ zS-RL4J14PYZ<#mUqgKfan5k=y8mpr~!cl>quPCUUx6-#$pO!bPwm%AT(Uag7ue z#<+1-#fkAEs(Ix|*OPqbxm?FFE;R$6%rQJCibpXT#|)8!jZ>8ENAStT4DqizXGop* z`(Hw;2>@?)jK?N&Q%0m!frP3ApkT)f+gNeUnwot55yA!b$pK!XUZcB#SWt9-Sn0XI zZ@jj3t4l^*F|{wckI-W#ol7&!a^1^a?v(p1QuO&?H;Fid98YvXM=Rhz$DDM_YS9lG9QgOJ^SloBX&^rts>7gUPc_E*j9H`dRbtd@G@5PURUC>(BvIyPVwl zhFV61jkwXrvHX@wR$aea!=CgfpM%*>vvaVTHN&R1QD?^Zvn(Ecy2r(69my*}*DbOa zpEA{0)xY;nyNVV0`PwHpSzIeE-H`--M~Cfh$uo2xPfMN^v5g3OLR_bR4U3MXqBy<( zMj!8(M3EH`&_se*jne74IEV#H=^qWpvyJTPbxlPPHx0N!y05@2QapBB;8glS zR*A}yR|mWwd0)w?F^iuzi%i{0O&t>gi^t&fQ3GwGllBaRy;$Ck(%!KgXKl5!IO;$r zm9h3?9>GlXxo_;#D6LWG4NVerSLLkhSrpWUctUslGfb?qS(27BzS-dq3MUIL=&rl+ zcv4y16z1e(q?Q;3nJb*|tQ6$#O)0$YcHor7H-mS#gzx)h){3f{0n{}2&cxlzrJJXX z;1!C}hXRH5ia)5YYoq*N~2NE(f90eKF!82r#X@;3D>~AB=CW{uvk1E%YJ6 zwB0o1>|B9fxA0I!Pcct(aG9psD#(H1e!*EP2sxECpX2i47m>7r_vfK)-EGGQF*$8c z;N}&7eJ_(@bkpmZOhq|~hI>{YRsr*?rQ}hYgcP#mrzug(qK`t{J;K~|Q_K9P4+Qbw z3$n7iQ8(+@1EPppgmvw)#{LY5;C^A@vF>>z&houFledZA-1`kKa6Wz14azny3GBqh z7I%epo1FehajAmEPu3R?^7jgr!$g1eljMM;-zQHCvZKla1aH`+8$Ojf)9RQckJXuZ zc{w9)Bw|~pu$mOwt$N$)1ExKZ(4;o`OYu6PXe^{u#VvY-1Z#H5h}fC8uNJat_(?%tis1{!|~>h zp}2JEZqIN~Uq`yy9b=ohNu;x36?ydxFQy=K9TTTK-Ds7N)&fI3l8jFZq>F=#Q@au- zjsQtpBI`1(vo^aH6}`W>HZu>OU&~vSob{|6;_lMnCNx<5wPUdO+^5;FZpj_dG0-tk zW2UPYj`0>{ADt4abpwj^98*c8QO*pSmHQ+>YI(ByN*e= zJsydE|7qe7>8m)?8!gGLo!YS&rr+BMOoiCB2Qjq@Ff^aHu-9Cmo)DF_H=we{Ekr zJ~xQT?&MNa@$8m^71o;=(ev*}?0~t(&~FZ#DRv;U{W+7rqu7V!Ib2g3DvH6msso6u z)9*JeEkbM$<7(Rac3Snkg)Oyb!bJ4fEy-}L*@3f07B*YpV(nQgLXxa@H3lsL@$8Uy zCzu)_ypB;#{pzCFgc<1$x?V7T$_RHlc;RUmVkka5apKphc9Nq=mBqo=Y0|>Ut-XuB z>UTL|ryxo?{UeZH=;Tg)%GsWx8d7X)oTWGB+ch5IBlZOSQl8E)EZY>KNHu)lHvT*F zFj1*sl7v)#H5Qz=PeDF}v2wnlTW`NQa(jlCV|Y&ORr~hY$LTGsI8*lyXVV9Ph!(!O zY&w3Bt5nZXg$G2c!m`%xYl!V$aV?9FolVH*ln{9c*OK76GI2r+3;>Dnh}&rYb-Oh~ zcUlDJm+s#@*l4wS4rv+kY!nSSyo(%U^G>mYU?PG}ux{{QrC|DRLwwxU%n z*(n2e;^Sl6D?9l0Z{6kO{?ZVfsXsmO1-VHMjxS+HkPLAB zu$X2tgNK`8a|B|t!Z z+cJI72t&tzs%7jODOcwgmYMvmo!QVEl#QJa%5qQh79F3bu*^ogxX`5g#R*KWbb-TR6@nqx)(ePdf4)2 zMDfQ-dIs8EK;No5r|3PwwqsRq%)Wo2BegI(Q7vM5rw{kv~YxQ(bX`pgQ@%Nj%%(TfCiS#L9 zZD%WRIpJR@{0_|g!Ibny?e8bIFK}$@ecZ9&`e^#Na{)he@cUK&I79vyY`0(YuhhW* zLht;4fce%y0L%M8S!8^clbHmbR%I_(&Vy8CLBq_mp-Xp+RaFf_p8$58@aH@j*zh`_ zFE%aWQp_$6Jdq_@FNx?tJ=#Ahsoq3sMChHjbQJe~jn9+q37=nls>&e6447=a`0O3r z?tzwo)OEVrg9d)5v3S(#pCehMGIrI&y&X;+EWuHvG6sbXs-(yRE?!Ni@l(x&I8Qop zV-=3@4s`P4BejJirEJM7VV=t{9f#`qlXzaoobBv382VnWt`w?eua#s66z2{P<|y~UwL}!%1@D3 zIVYz8gI9|GI;QS!gv4@+kK7=9*p&YS@O^AoS$?Dp^lEW4TX>;xX>HfyKr5m@;m_CO zKpd_P3;K)XwjSfZJrZzd$RKO7fYS(Q|EJBD3QZ+@=wC7^E;4rgiia&!#_5z-z0@0e z!+%>iZN7h+>$8Nt;t@MEG}?c$9w?DdeDR*hr2iR$;7COsW#}9r%@mOaT|YBn!J7Co z2zZ|5UAI}h~2OBd;6M#CdjC#_!s7ld>nZCWNG&R07mL4mhXFOQxUghh3y2KKISiD{#*wu-Z6jM`tiw9|Is@CswHr5M{%qkoTdQV>3$jML)6J2H|y_4xN zH-2sRk=ATAYq2R-`JO}H0CDy_VniLEEuQTrYY&;0E~^eI4tYb zAnO1cpEa*$=bzZA(3mggfr1pkFvU~VF0$#k4q@ByL3_h3bNkDfyfK+7_&n&80qcZ3 ztW0jdiZTR+2vf*@?n!_feglppV@Pt}N1kJ(!^f1e=DnW#*sM8PXs>;_S>90Zdb*=1 zzR%TNpGD3!4momOl4QBG=gty+YR!FvOqPw$Bbgk#aMpQ@TN4iHRH1E?4TvmFwE&Me zQqWD#t&Z6-vjG)@KnU<#J*D&2mc^A$YjisDpGS-(4A*S;(ZH3Y#qH;W zj5V?);n9r)D_+SUN^4)dcK^i9)(D2-7-lPfPq}7a&mv5+a!LjS!3exQi-Vz8_*@q0P*0yywM0)%d?_-O%aazNk?R&=YssN zA2kA+I+?6{f#WIpfFGlQanR*N&;%j-^rxO$li`?hd~@;ZGRJy?UhxO2=hxIb>@rS7 zxP+cIx?=_jyF=RbuY~r={Xi zI!?S(e3q#xx!Q)C3pR6ejDO@k$8y4YnX+UZQ$&D~8})X}E>|#_F*x1~Hw`v_h?_p? z(dFE@dVNRwg2Ka=9uBB43DyZ2oi2*2Y%esi+NzfiiMvgDCw@Mpom07x7G~d$6OORF z`+ZCmOc{V@Yp8e1B&d)Scz+h^sS=|=TB9LW=RSh^keag{>BuXTJQQ`IMc(=;o{2ke zOv1Ud0zJ_Bt&Hy(J8d?WW0>QP9rh3xNCvy(_u<0UV#AquZ&T8CJ8^ptJr*|V34qzVyaz9MN0?I-YRu2uPR_WcrNZ~k)QL@ zo>`ARt69drBuXK5J&$!DQ(`22{O&_`dJYW^B9vUi34r50;~k1!utce9fx>OU3HwP5 z(x@_Usc29b*svAD&^=$^oN=*W?!)r^aD2H77`+$0VDxZ*28TiDSb*g-#PZOhASui~ z@%F$lpP7?VedEKvu0xr*T^wBPWPM&1;x7!sP}`{Z*i<4?NRebr7jdJgs{RcaTb^;? z?;HGU0@()#x1CI@Xvd~TB4}v>zv;MQKC)S_b3Dsublc}0F;(SwXe5vHT$dO@h=i7D zA66F(D#OD?lIJs=cABAn>gti<5L19aLwviVZI8GKa;TAi{OrK1k|+S{M{tC{bxU4WG`0)Di2NUX|28Asv8WbeyhH5@A?wF+V+H3O_?xAgLyf&u> zWrrQPN$;9Aj$6B`mU+*h$K7VX5j$c{Bbc+WBOJqrtDPi!nFnd3diIRDdCxlq&RRP; z)bviWFkf6j(l3F!Tuf7E?Is~sJeF3(=#vFvo%aod4oWU1u!HuD;WKGSxq_0~UF#6P z(rvfjOXyc`IR^DS&?+8G%Y?RT2!HPb#&!spEjp|ve7~h@MwVx4vCB_>g{dDRr5U$H zXPEPk0h^gPO`NB$D$|SYI?NOoaOjV?=Ue!}P6V^W3I5}_O5Htk>O;e|Q$=y4rLW}IC?j>d zr#+e-9<}Yi3P=m0Ynz1?OJ9rE6au62ZOQLujrSTdqW`uf*Ur=Pnyvj;TQ|j>8 zGqMx^xn#7L@M(|G(@DKi;;9TjVzO>XmzN>ePBqc3r`k`7t=7r7)_Ptekh6ifr`TIB zP|tknjG;eWbS&5yIv@#b-&yf)OEauiD>2OsA#7`jJ~5GQ$W<2`<*WLHRDCfr zx77%9RL`mtl%P&>j{0VF6+CA1<{BNm%%4&5y*%?a7yIQhUW7n!eg0Z{*!46gs{G~l zh~OYbb=}?MvlB9C-yeGC{ft6#hT}|u!%|ObM_ldRN2Hx2jEkux4GGVA_GDQ;Tt8Jm zJ@=vhiCp~Gxz=3HP2V|hy?nT7tY%h&Xth88gV`4qD{6{E{slH-u4xcrcRgNwgiz?< z%aW6{p^}@)OD*Gw-!M!Qi10zrzRL?9KSfrKIBk%V7dumgwms6H(EIWLGSEge3=eoJ zzgC+4X5}tBmc_XevGtnCM`)A{0|YH9hMi>zcUpT8FZ9d+jSy1Ej^+x%1OY{}BvwJ? zVwQjA#|Fkxith|=@fn-IiDoKw!N)jJO$JQXH68Ba7as!Ps=N}&w|HCT6c!gM3%=~2 zt4`%NaLnR?cxYn@h;q~=d<304C56>?Yrnccm zU5YbT946V!d)q>08R|F|+Xj>vUaX{G8pOh3{D7ini{D2@Dl2%#EJh?#EE7DA49NJ-Z=1_5Z zYC&bYjI0{$vM^3{TilXHaVUor9FSxMM|pxQLhEu+FEWa12-m`na+1nc>mkW;WzM31 z4}@u|qH<1WtPZEP*6iCT@O$^J#jfqaO>bUv-Td9Fz4MXBrZO&-FHO9DQFS(l%1L1P zwI(?63QZ|+6B^aQKbtP$d!7gcsk!}X@{Fw&uMBQr)x9unSL7}eMx9L>I0wNS)Hf_k}zo|wMj zwrw86%&x2j+-Ao0`9@8o`Ne^Nd#HHP9fM6P}1k9+vh@l4I@o;JU)>($TCI44-o7f#+s;Nv}Ju|0Z$ zo=h%N0Rc*TBJ7u{CN-NUqD4O;?*pe?I}>{8r1*kbEotuY;`XR>lM~`6HyoU8p$EC- zLmh|gb8Cv%yitCDpuRH+pAwEfY6s*<3}aHpnR`$l11lT4c{Q}WoA&eNYE~21hcGkD#Mpxj69Kr zsgL}dB#=9QA9I#OpRNnZ`2d$Xg_ev)vTgcGVk*M`a)JIQkao!|9#TI_`~S=WfLk#2 zD!6^4+U+|(aCH&(S>h5ZP;S+-65@AJ!%EH{@ZA<<{*K#eHsUTM3Qd*#J{UF!4wtR}SOy&T1|>KoIy zw`i4t;wYVX@xMzi2UOjPK$9T52M0<(xv+}h$5Wk;#B%JGKHmiun7U$JTD#}&`AkZJ zKW04By{60M`=yheH?v7_z`!CaJlp#eHOwJo@}IOF=E?v#_BYM!k63zkgS}$?@p#pv<-JPWyhx zv6}wleJUl9i**!SSRIN@&j;AV5 HoWJ&8NTD(V literal 0 HcmV?d00001 diff --git a/examples/getting_started/tf/figs/fedavg-diff-alphas.png b/examples/getting_started/tf/figs/fedavg-diff-alphas.png new file mode 100755 index 0000000000000000000000000000000000000000..24cebdaee5c4b16b671e12ee6e5266d29e2a2899 GIT binary patch literal 82496 zcmeFZcT`hb_cj_35GjI6QM#f?F`$4b9YI6^rGwO<(z{eCAs_-CQHnH?E{7sWFQEre zq!W6FNbfB`LP)zCJ)ZBJ_rCZ3#`nh^_x^D+27|E4UTf~T=A3Ij&zviNXg^Y8q~o9i zfk2E8?%&k~foP~fpcDMGG{BY6HfCnv7lntenlh-UmunUHgUa^K!#f~QX%s!-3GhkK zDYyH^9v~2R*zpgA`KNQiz{Rtks*gSOTx~pkp150s)ZDGDT|DeuJuM7qfm_Jo5ANR4 ze_^pPed>dmK@xd4apR&@mI~X5XVBdQK31y-`j`I@G(FGEt)0Sf_ps}(u~k3qIo1o; zDW5BG-Md1iI{uv^rd{ANg)t?i&}F_@s)0LImj^D2q`uqP#tz9(=enZui}H&WXY^3W zy{X)-hG}ziN1th|57O7?{4JL$HA+ChpRfASE7erL|48F!`TaKI-rYyMLzRw3}L9XN6zI*Hvxq-{GKLK`$k{cCHN{K&d*nq;?FxOK9^wrz6J_(I7MAC z4$As__#LC^)- zuE|?5A1T{eRJX+uaOIpsz)thYW>&kEFd#mEVPkr|K=IB-%Amswto$KPD}2_*jeNH7o}ey2!*lyYd#e% zdRp?Az)CE|5i<|`Sre`A(qK|Uw;1nEMeM`$0m@I6}14cQJto|fIXE)8gT@*3o@h5cnQ+tSWg zCfDP!6b3ARl(zB28kgH7Ep^s%8~rUzv}#I3ozjh`=ja#|w`9>>(jJQ)wvcG{G^p-U zFD^TP9P#Y#qdGGeVCtF!^x(_w?l$cU{u9`q10rs9`7*Jk9KA3ljb1OvX&=jMhFjh6 z++e9$!5Li7R?=5ZYNCvBbeKy zL2}x_*9p~o);S6YEd>#=4{c6G-M9k-@hffg&sN3;7x3VyJ$m#;EH|;+O?r!v+B0G7|RXE+}G>(dA2RJFCkdT}Pqhi)~b> zPo3Tn#G!mrA%Z;OxUg~JCA26L5>LJkNe83wPfLDTC7$d4&O1XsMWa?a;=TiW{g}^FS$IMSF6U{d*&b`4#*oJh8n1uD&KhzOa#gS%R0C zshKx8Eo49L{5HnJB~8dS>O%AbxvHBTM}^bbuG=n41(UZ#-=orQ8IwFR$0Y@N0`vp( zj7F-}4H>zvg-ohmnY5n&gzEH-CF`Z|4swdZN&5xjwNa!dDSH#tqkIFo4t9(8Th1MV;!F8*TYQRecacQLSrT{ZWa zvF0V|iAL*Zdp_9HJn+}3n}ClUeLbanwYanJbMgHfOvz!)S0T^{GrxW9{<=RA9`j40 z19h&h<={!9j(YN;QOye0nJ0cJe9Aep(R*q|Vv#1XaxafT z-ni(n^@@8-T9Vo~LK>5QRXypW1s;MU)fjd1Hy7LcZdUrg)lV3WfHFGS1vYp)2R2Nc zL_ALm*e+-9NO4DvW-hmbVTZL`Q_t$^y+^HZ1896P~aS(-}F4X=e^R)RF0RBia-dN zdz4$EKHNBRUKeid&RO~dj`jPu6)-^!f z z$=A2<*9|EQlJ)l?t;du8M-{lTp zvVumMnWvbdzOMEo|Th(4%flwAjD&U<;1ahJShji4aO*^uw{pEe-M&CQC zXXkDEutQD+Ylzl-+t=J@5som+%g;>;7;%D@p$>a3cC#mHGVokVTP;;2PeZPgDZ;`~ z(Ze=*H1rxwNfhA~{6SIm>%ETKSW>EQ#ap{Z*ks}~KPpwm)Py&ITHJq)!S`@_>3|2{ z@oMm;sbA9t_kmerve8KAOR|iSs4G-QlKio2amp0p6v&lKz8|o5Sz5Nd&Z9~!*1uQQ zxVM1>Tm0i{TJ+Vl4!MLyM6t-ivKGQ=OC#}2c>AAHuGxcEZVud~M8j>jor%-G0?As1 zU9pBVw`boS0ne3wmGu)HpQxem%p$f%UKnB5QsiS8jY}|LAyz8^8L%YL%;wN~qmApF zG&F82iXXgc*xw=U4Fi9AQ(yUmC+XRU8NK!HGui z91$`iRqPo9J*`khl*4Ubi=Uh@xM9@(aL*!66AVR-q){QwuRs*g_odP8umq7bI!xu` zi^UE2M&n8QCQA=v_vBmcYu@k!R^!Hjsex-)>r_eS(}t!D@knmTmU?EX`s-?+c&ktv zOHU@%PAiNkfC`b*Ww|b~IGdjM#Txv;ZRz{ut4=R;mZpaqt?GAXLZ7OyZap)nL`ZM1 zEG@6yPda>O`cghp+Oj!;}OwMfWKy7kj7gU0%l8zRrc{DvOm18YdN*8 zz#7*(t83dRu~xcOpNS{NrVtyF-1U-CvKHQqjWZ4<4A;wf8^pF7+03T`*K(IT7CK@V z*A)Z%oTA30&%JNYhhL%;^~w!E;P6+q(gfhwPI<@cNN*Q33Z$9Eof?}Gdi6;DDXhba z650MR$^56y^3iw(?qO&A#m0oii1K^OaK%u`$O=3EjncL%N6{~y>4Ak=SMqlpbI-nU z)Hx3*NahMWQSDekB?|AE;M4`FCCCy@Y3bb2nI6q_-CLS6_yjH)Ao1!_gIygpf04t5 z@91}L$~z+-aOhUPA;UNLSc-&+A@Ednn?bz3wnUl4AkE40-BTu;iSm~P3lqoOa03R2 zL^5%c$!e|R+`_pmY%KKUvcuudoq?-|MQpL&iOOv+77mUIUZYV)UZd$2*SE7rgkA}B zJi)J)!|zypP`mu|$&^x@#>-SmSQr^@&i_N#(a!GryLZr(TRiReRN#V4ayv>TY+^S~ zQL8^2{%EBNEi*N>JQ@s}9o%^R^<#T}Vm=9;nQ&MS@|J3k7AM#k|fCM78d zk_;aNG9~*CedMirB=1m|KvEdwy0wnp7T2g-N@)MkWz$*lEL7hC2+lCqFS2d@%CbHJ9rrzmzpDn^4+mF)&}(9e5RrLMitEn}4Vg-8st zZFknod_6MDA9`-!meUi%c>~9&n3cf3>xJ+ITSa_QJ!Znu##D{A`{T`mulI(Bbr-cQ zYpmAtPFRmAq^2tc`p$psR!60lOMNCT{0L5xZOFMH;R98OyuhYuUv6kPd9N{6p#9>~ zpDVW3BMZE;jIdannhFIcW8Rw=-sBY6kEfuP^D0oJ4I9|s5}mA+b{Sc` zAT+IV^gtT-S`+ok8VeC-C!%*F)A6j1)>%06u%6$^!4)5?;Ugw-5qTe{q(`}EVGoGJ zQWwA2NBt(vi;}A)F-y=gOlw>DBh)W;AYV(3KJG{}5d6|XFj|sH;V>fwM#6E6n(TP3 zdJkbk*&9fI_6x-nAxK9l8bubTbWq!(03qKc}F|jwA z^BoB>Cv9?!?9D{Q##i@z*1$R8M@s_6w%*lYoxQS)u)2bEp(J>_oI?-OWfLJF^KENo>4Rseq54r{@1HiVsVauv~+8q_f%cYrn&b`Un-DeM* zJLuEdIx9Pt?f4D4C1CDDYq)2agm>sckg0tOaAjSuchNniN0*F6BGT%+0vLa~0Z54e zIP2q!ScRjPLVsdzzbGRt%#}LRWj!Mu+)E0mUJd$~j)Qmb7dy0H*Hlp4l)(+`bZ@<# zMQ5+)bS#J8EOzlV*h=T_r}HjGc43Q;boYF!IQ-BC?z4A^nVaa5mx@(V(bIjGmloD) zqq>$S2%l!%r?9Tk#k_`1n?+?fxcsH0@XbB(&Dts~BneuWQq$+H!e<{mF}>*1 z4b$1LhwV*43!GElAJo*Xv-w&jv1J@c*XodN#FWK|dSQ4@9Sy znc<8v#%Eld&qKEECaQO{2VNfG6iq!EXp&Ra4VBP;l!kM}VNZo5BdQ*9BsK^qg}2W1 zZk9RBz}gWOEK`q$15hsHajWrS9J@2>F!Gx5h@fe99mA9(?|1`b@5J~}0B_@X0Pn{9 z?S0Rh-QEc6FOO7*(U13I%rWD7VVIFooxM$Qrp&znuN{uOF(@O~^-Ul5_0RYZuS%cK zU>C~SRjFKlrF>7lr@f4%cfXwr0pEBB^YTj%VB}RWqhaJ=ajG;y1VWqNy+KMZr&xjq zJ^236V-QLeUH-|^#V0A`$y5%Z2l2H4MxZjiAiG^(&gDJ!q}gx_U#v%EEYsGQYKt0h zhVaTl-|m;1H275lw|o}5?SIRrBGSmZxScT;AvD@!D#{7>iQ%bx8{MC!*7dsCe37{9 zl0f$R)D+f0Pu3HFdDTwg1{AF{5@jy*2beD%t<|YRHHs2KAtV*=n zjI8Tw`v_ZlR_Dl9@^GYql+k@y%SoGu6DLkxK>t`@w@UVmol!}4#kP4TlFX&r*Y}EI zT}j^0ml$@Yu`yckbanx6Hqx>cZf#$7F&pL-BkGm3Y`MR3Mt9UYf-s*gYkg$&H8`k~ zI0fB{yhgq}A*V*P!bVxAW%cuW$B1&yyw7z}D89)o7{hwYT!4L=kV1IY<-IzPg}|lq zhhgG2dy4;<%pr!$+2r(3J_zIe(Q99!90=6~21{_cEfdgIv*gsPJ zW1L>-rQKiNU8#J_)vi)^=A4Q(f*-@$;2+u`U;-<`owQz+DE8(usi+s-FOt8=JzY8b zSxaMSk{-sbTF?jOhQ-V6#6Ghgt7Ga6Z8I0%3;7G}$jHd*Dh&@0cO*9MXIpAmkq9{I zsI%H{S$0$|Vo}^eJ3y|T#Mk7*LB-|KeWTe>gd2R_apa4oh{DtBx=L07d%Wi*Qjgvk zEhha$StY8A55#S(a`KLPIs0v~%R}x^Ef#HNgRU#)?MSdz@;3C-Yvrw_ck4ZU1px^H zsZVn$4eO6I-C%TrTX6p^^n#q4FEyouqu#>&xf-V_E<$vrvISIN4^QX+u!1yo4o5|q zzv$KfoG&9GH)LJx0bd4?kZ2KGZV!6QO|$b}{8b=nI`{6~I{{j4v4(wJonVl*NXsDD zcpPJ7dY}^gz^JRjKCu%?x~`W~vH3<=&xMe~m7N>n>T9qxjnlV5*t*E!j{#ReLhK@n zmo6}GM1FtU-F@L1OKA%9*^Y`}c%~ZU3WR$UVug1^FHgSyRf5_1gN8y&{vb|{xIfs; z0}6fZs1>}X#VfgQRWrw)*nY~NrpMuyOI*)n-F>72R5JuJ-gu8~#xo{hb6NE*3rL$y z&-JK0r3#|8I+z>oX&;EPaa3mb8J>j2b8{uisi67;C!Pmi|sfFt;{w`mFceV*UN6i|BzBX!W_XGeTLr2v82PXvPklztQQ zp#M))wEpJ!LI3a7Jcha?plk|$3DYyIOa!>NSuq-+GAh%7F3vsA|N+di-{J{`))peIuZja?2RNXyboa)$R{y z^jN1WKo`ooL?zbmR{`}!1I&!St1*z>5D3ePOqfd@y^7?33-W#yDPAMS%{-UKqc5r~ zbcz(m)p#s-a{El3*-(8C1q>|Vk8@W)lCwmNiWq>KYM8*8ufxBOWPx1(|o4Ez!%>GP|iVgrQFq^AQEO||h+Obe-G>Tz_ zuH61Ee!X*a>Gh-zojfPoNynN4lLR9E2$_8DOFU1Tm>^qbkQyyDK}GNfuYx{4RJ%zr zAP>Wo83g%t5vV7Vzu^wAn7tLPV4g02@}Oj-)Iyr>tB&gP_;t)(Rz%jDiE+Atak{>m zt=O59*u%vHTQ(X<$LrdL*-7J~mbz^kWUZk^!}6nYi?v2dE=0!TSCl*#S$2RaV89g0 zzvd0sZW4P%Ndm6Z15;OwK|6(V+h3pIU-;-)bPAJjlG^_8tv2ZSUBMq`0D+k44*$Ag zR;BXyD9RKFX1Z^Q{!;$cplvpkP<_^Wbn3^pc*l=v?wvjkS$Wv#Fv^;H$(}U-e(In1 z9fmUv|26yZcyp$L+U;hmH8~;uEw)up{WDej&8T(W;GGx!req7o)+7BIWrf|Bh3_Tq zk!sMXMR{hSu2yZk^lw!E?*t6)ENR^|^GKQKYL-nJs{iF+HXf;zVM>d^8h##NsqJg1hg63kXSwDZ#_O$3Vyw$qGxXL`(} zL{T;)KC#wGn_-2`i&aa-l|@;e>b``hX@j%VlXCDQuG;mat``dF7>q~*l)nw@;$doV z)&llFET@pI;Eyz+Wmy<8$}|>-o1cOWQ>XxkYBS&CB>Hwa^PSZ}6$oDm)izA|Y=WDH6K7SavHvxJLZIFnE?* zd@Aqj$@aMCBZXs0+q2#dS!62!WBt+6jtyB)JubR)BR)L!J99B;M#0~7YI&$pLB=$L z2}Rbef7G(EEx71a%hqi%>WlF{cE@b;8yxZ7hAvOz4ZFP7JWco zkwD7v7Z?^g=bCk9c!f+C=?z~ku3n@>~` zj8>FYRcmY|JDjhziD%%Edu%arc4NnENHE+ zV?6sbi;g{0_{{Yob-^DB+c=g{Sz*3qVYcvecw}06!X$I8d3M0V31+^c)-l2HnSv)< z^K7jRX?gf91<0bG)6^t0NmhSjk1O1=%}0{s-hXwL^y*N`*_JwMWQuphPEBw1cHUzy z13ts5`x_*ZE-U#!Y8;ikZM+o9mVZF}yglfaHJc~h_7!{MOCNr!Kj}a;9qBGsc{`jk zCz^wWw#P4&o;b|HDxMelCPM~29|0kJl5@vG&R(D!n{%kXK2@e@Q%r&N6&`h7Z}57( zadU^SX1fu8mX&g-cE@qt1q+n6PIDg^8&^3c;S|+rwWVKCzUKUI8vXIeYi}DuxQfH& zGookGJ=k;6;HL#{!SHBh1XD{ZS)HxEJd`rqGF^@CFrF3IqF4xy6^D=qI2y)TiFq&< z?-V@9D!Nd236Z{B6RL1)Yu+*a#B|%wTydA_-`mphW(@2}vnpQVuOfCD*-!-?U_ya9 zJGmT)szGacR)X4Vb1?lF*-_&m97|n!M=tH7*}B?|^^5t{Jgrbt3({OD9qt4dNgqv} zJ8TsLa)a=HJ3e3{Er8eWWXdQ^l@kp<=T!f}FAIM7LWV=u@1h)KGwV%oWc>Q>0O~U& z%5hf_&zCJOoWe*Teq@rPKzrP$+$l*c zRA66S&gy+l$Gl(3$tu1}hzC|TaP~0U&rb4fHQ=9rMLHnrB96mcK%VG3-IJi%@-2Gl z$;J=bpk3BPZ9da}MWg5PYfgBlsq*BAQ_O+{_@_gNaW(@rGZV0!q4SAr7yq>z0of4b zcq>xSXSdya^;W--G?0Trn4S7y{^{em zymQ*ih8HkrpQq@9&2V`bKLBEykgA||Br{)rwTtMUpj%xidZ(Q#9_c)UO5el_n;(kB zk$4RNodg5h@-NkF+=JcLXY(>Y<85%*SyNYB1WH5J{gHQ&IFrw?AS-yk|S6ceo$s2a3t zPEddeVo9S`3Do7INsCc~M#o)NNbQcpp zHQ{TLRN$;8C;4uw{ouF`$VAd;7PoicS6-^e6uS5keKlax{h_q}Taw4}he?A!6hPVf zBK*{v6pFV!%&h5-T2om8$Lj%`dREzX7pSv*%DP8ajWv%TM?Y7bUcp&}X^yt5AnR`ehHDiEdUGnE#{=b)Lk z1Bs3%=}uW7X5I&k)SWs6pNKip1{V<*3Kd&s%F8Eyfv2xPSPzf^43{>#;00vkz%-tY z?!OI*Q++s{Wjmi((4J181$r+enyHlB;-5F>8lTU{D@tM?^psk|e3zHwqq;Bf7gm{P zR~)0nnubVq3_T}b>q-E}%344#A39hgox~Cvm^#z?)0x1%>@vezEJR}dhkmL-KY1m= zps{}k9%j0@G^uTa)?O5Z?&JZpuRSoma0gf_apFp%+0)Yjz#h3}skK_88fd+(RYSj= zEl5gKz76svWA|kE~Hv9lEECbc5SgIZ~L9Z>m$#RQ=eR zv1lOXN8>ZpSt%FzQ5ix1Msk2ZUSwI}lBH8BOnvdG=9*zuPA#UJ*r%!d=2xqlYg-GmQX{b6I)$|*;W1Y5Zbf;W6Df1@%t#Z~r3;08(mlqEe z%IziJ{-1Qjk|S|ZDLZEi*oXtsU=05)FvfINg4C0}+)94Bg4aXkj+*;$X9isbZ1QZb zHeWt%QUU05XzR|X^K3ask4nL&h^g+<#`5x`ZE*NZ!i9INq6{KFym!=BLm_RUn#t#^ zpKss$TwI*U12bjc5Msp3^o^gow}=XX#TmT@u%Oan0!BfHi%3V;B_&RbDS! z+H{6ejcq~!(9nlBTOJr+mJdjl%_@gy>la(YUlgq$Rfk7Grm#9!W>j!x$nXYY=gi59 z*8c)?FKp2UJ@u#fW=#TG`nBcV16)0fi-2ft=@PNHsQX76CCiZrkPaOur$W|(%%VpH zk-ZqPd)4R7Rt_dx9N*I?cDYimttb88wVVEk`bIN1OLR(p<_A5vFiG%n7{|yUqH$o%2?=7lI zh@rq;&ZxZ?%I&ky(VL^+CMI=V8`CiN=?4Tz82`zm0A6}{p6(U$$f%W#TzESi^rR7% zS9P(e{Zvv{fPKH>+6L)z%0;bTD2JKO)1{Nm)Jf0el?S{)C#XEL>D;X;etLFMCn1i+ z@*l1>)OrR^{gQ9MK@-b)(fB9nvA17Q;TlKwt0vmS(&rQ1f*;qY&bcdWKUi+G1!KLw zs7t#Bf>Vd|MgARa0Np=8hp*{s;?A{hQfK5#vufW6IMQ~;Q{+8W0f9+ZGk4OD$rr$m z^$IeB?i4`H1Or?W9yYtThO!p7w<^eD-u=i4dOb(4O8)OG8 zg<$q&Sl*T;;q;{Z301Q4c!pX!{As$F zf4atv{ETQFikjZL%Rr9%igH**Fz7$>t8X}!O4+lPo8Qj$SZ7Yz!`}NX!7}FvnI;jW zC2>JQ0bsX>T$HcQ{MzL!nd$7FOQkD6uac-V$2eL0n?E>px?(JK;U(IEHSzvtv8bHHIP{!FC6gnz)l(0cfmI0VmP<=hmG3x zqMzq<=2|1HzrVfEY~(QrDKCZKg*r=qNsrjyJ0oM5pTWn;NV$L0Xu~mWo*ke{016KT(BgtI_JcV| zlv_(Z$=0oJSj8=yK&vPOqW*9$!X4=5ra`ejBsW`l^xRyEDsK$FzI`LaQT#ajVuGnl3~*%X@fM)`gc$1MGY?ir`>W{h zoUZK;uBKXHy#6AOjx(7~T;@dT>Ab$BCgSo;V$EeL!N)=h`cYiz_GnYPa@2M3k*%mi zXdw6~S%Z#D4a7Vk30WS0`s}e9b=zfjqDBu54Ni_HRsN^oog$NYl+e`GMt3T$<8)O! z<1f4+7%Wm$t^DZrELD=cf7Nk!QS;15iDmOkYH_c%Axl^PD0$3>8o9N+tnO?tpa}f& z%n2N+8Bo+QOM9$WWkr3*Ovw0CM7Ao-;QlO1l~!A(H6ZVF6iqw3D!CK*q=vK#1-Qz7 zcEmy;SYAV3&Ng6b?2_N%wE$?!0XdJ37hy#&reB3$Q8-@ES>;4t;NZ`o8jw3Y0f?(()$OYsD8k6< z#?@|_ILl2oysYU8GL$(Z08P|3U$Fzp4m0Ty^m%}Yrp`@gcjF=bcgz=4|Ej{WP)E}r zh2Gkht_5^j23hdRk57U~fj*=__zEh;V^lWY@AD#G6F}Hb)tmD&ZfMC_v1LG6=HAGBLcqVv)Zl zOW{gEZ|zw3fh#-X33XOlJKBt{AI77dX^E-MEg;I^&H*4rKUXw1evO01lZDo<9AZwo z4XwQPpcL!O5fX%Zcm9ADxQG5HC(|@g9!LGc=k)HLu7wWTBjBQ}qmsrFE5MSl!5@}dH;6hDA2hUP4$*VFxbp?e_29TNzdoueg!#*kwTLBakzDyc}>K>9~vn6bHJO`JoC2zTj^cBfKRO(_ct?7z=No?$9J2 zOi^(5EY_*eF_FUxHw5a6)}D++)Ij|>XJ)Q=$d>uq?j%?i7Cv=x6?*vLdb1fo^qwT9 z6r}qce1lteB~bgvSOMN+otx?{u4jHrQnD|(?HuM%RZD-SVNHX#HRnXYIx&@medsZc-RicMTGtx?YUXk{ z$1^bR>X@A9HLy3V$(OetI#C8w9IuDKJd(z@Tt&hDb^AUn9z z>Xm(-0l)8#+s)3%$m!>H>)zWx7-n?s364t)Gs6)_A2hzDN7fFN8~nr+Oz)k#KisgH zF@q|Ur-s&Jj{Y!q|g|ncIq1 zIUSSi+Bm+Sbr%f=j=ww>5mOF6TMfkOg&!eIqQWeFpSF*~y6lE0-s|9)X;&8u z;&BQjD9=dgiNnX=s+6RSe40$0xQ7r_gx7432@y>wDZoRCAr`Kq?@lc0c|j84>6^+S zmKjPWfJb{iNb*0>+ z{u(m-wa0vVxsMnzZ#0vodQw)Q095LDdqLu5gR{Mx8_@E5FH)d{pLxnY)cu$YI4|^r zx3}Vt@P`L79F~8cqHynLZZfkuMR9vDXJ3-)dZKD1t(_-jNS<K*-Ql9&db9Fe3l%{~S_t@ne^ z_PuOiJ7|mK7Ll;+mE@<9WbV5}1~QjL;4Cy#ql82_aO75gMEs2slkQE;zB4C|ZW4{n z_x*s6t@m678ge|lBWWLdjkHRipzyg1%A)KL51)fxQY!^cNstcS2AJD8$@H?+X7%=pc?xqA1AR(I3nEB7J~cay4Vr^L zoPvYo_h#hh8$WKmrsRHTsm694IIj-#Rsj zMNCd!TZ)4UV6ni1Ck$SPfu7~=_lVH;0lV0arveKmc%Zx$CVM3+HK9!u*PcS1j@p;b zei{M-HyX(TO=tRvZ1K)dYO-1bU0O;mciT@WzI-fxO#BzPyau7<_jG;%0wSB z?v;riw?qdqeNf2z=1CRgO&kfb1e&s%X9%nMcZo*_T?M6X=Lb*X!dF8GUw0-uZ#+@D zntO7McJSmPQi@J0@nAZnVcee9Qgqo?R3vm3PI_{bVYjufm}(za07y8krKUIc%Kua~ zLeDisWOUOQ*2!a25ky15O?>o?7Z^7b7&kT!um|B}4P>t{6MF`{A$@$N)~2SW0w^sD zoNg22$oJ&EZdS`U)J1qhHpw$;o!M6JU^~CM`Dg$2xv+l}*_3czEHwL0kFPZ(fJHm( zK+3^Wbel{WcY=Cxke)LjfL+om45yr(L*(BDX{)<@xNg&ZevhZ&NDJQHC5GSvngsp= zD9;rFcW=_Jd@;Mz(iD86$D_t=R@wJ(%h(2Xh2_X>71}ph2$yyVB?CBs&*g&xEhzd0 zPV6%Y9u6acV`N9#$4wVLY(O39yn zh2^g{l0@t3Z`zeRjU!+?tRSVr{<$(yT=%W?ZO`kif|zCDJIF>KcKq$tAvxaDipGJv zc3E<8Ke4^@6IVtn)VC@gjQ74`8r&XCA71ZVuF=KRn8?Fwzi1&7au)X*PEo~$QDmQ{ z3hD*?Pwm+M?g5}6hBDirLx)1=yil}G`|F_ZVcqwG)V1j2#Cj`sqr~{BH^$Lh#^lzt zU0%b&GoaSK-<=Wn=w7`1Nh&I#?Qa<6dh3{clegQs=WB1kt49U5btzD1bKG=Z%gg&B zPNB5gD~g2ES_tCL``pbPd%h?;{V2P7_BD4*G;;6HsnIwspqBHiYv(r)t|F+-!5vDG zy2%<{Y;u~K?i6MIFmJ~B<8?Dr2{zkXH_g6A5P=^SzaNWE{Ka%!WVz2{>Lz9t=+nja zDw#u^$f99BcGEV0`2y94uEkKSmu8up zc_YdckPr|%P_fbL)#eMpG!{gEugPW+CA1N{$;yCb{A!2T%?uK;$&g*r>RaV z5{R0-_q^tI-B6VMs!+yuztf$U!b=C4NCe*FEsdb!e^w4d2l<-Jz6&yxI9z1uNs1)0TWKL^oehho1n zDVj-bT-Rn3voO<-8FP>L+6iW!%=nZwd7rt@x0MHB#Tn0h`fOb4z1Pd1!5obIE_RX( zpx0LzPZ*|om7Y3LMon4ed;MLAhDJzGsoqhQBz-R{?JG{|tJ2J%mX>$^&Vv@Fd15|% z8B;b0m#-*?n?!2Rw?sCV$|jfOI)`V*;ELsQw08X#5nHu(Sw`j+dHCJThwi<$>zeaM z)9@mkE;~_yc)=W<8T@kQbi>2iv85{Ro0*mHt{EP>p5e@kk#%Fs$`JP1(B)J2X@Djv zr7w2pcd#O^;&+;sj&z!53~ls5Pm~_4-Vf@fqL{dSe&^0@sy{Dai z7$w4Vg-J{&6!wM;(w?kXaiu43ojpx~md--afJ}ajzVvTwHG>N}MKnoW6BH0^|4u`J z@uql?%TJ{)uMS#aIG*-e^%EtQTeN%(D{`DvcC^>M)e$Lol%9Pxdf-rc-agv>xh7Xpc07_~>a8p~VQC8nkl9B=fx+PFtprY7O2EeoFMs5ZZ$c-L9 zJJ)C~Ba9BGCz>q4H56J}y{4-!2C5OsWd$tJ?OUH4TU8#Qm`<-lsME*4aidDa_A+D zo&n(obU<&I`A9z*7-lA&)<9O#L!U7DiSJ7abWsX zw#j+3>!UgpI2wvqppIjZsvL|iQ3dASFVF?-;wxKI<)L|J`P95V0cp5kRMiK07jzl& zvq2yaXc_&+_;>49x(QC(*^heR3=o;9uylg0;SREAGxYVyk6DNjU&Gg`oHETLR!%x2 zQ-WQWM~#L>^VT=Z;UbBn3d1UCP>iuMhzTHca;Gwbz>#e=D<2;gwFIW*R6M80hn!?e z;E%km%vMiGa5}UL8i(Y$X z>MO-PRn%vfv2yj_$LcY9WfeL*u|K^hzB;D9RNL* zt{YD2{pf}BNWSW{Tg>gXFPrMD-p(S5&`sf|#__8PKG`q6 z>>o{y21QX5b^opv{j=xl5R+LOozpDOnQ*^2thJd(aRHxM`+?cAqrRJq%t8_=7y+ae zr`BS*by?6_DH@=b5D-o()C~VQ+C1R9F8*pM$)=^A;>(CbpJLW8DnFbzSuqM_!SAu2 zrYnB|}C9RI3dwk0wRSBlGPtCazKw~acVEL{+DMy8+k8h z4w9Coga!NXX<(&{LkQ1*oA_!6Z@%Mmzl_Lg-h9a8=f&ej-F87&_9PPX8cmM0Q?>f# zO-s#ThOFn#t|SrslE9bSeJWlu3d_)O{#x2}W#wSymq9sUzDon(=sFLi>99g9itTV- ze{B10?<-23mO-Y~c`Gs2Rx0`EZP)AT( zlew=Vpt0uKE3iUA-i}h}iC1~$Bh%%F)5sRV`H_2vm{zqzP4A z|Jm9~NuLk+4wRonPLK&bRfSd%4JA+&qs3I@>QUM=msnzCa6l)K00SShQl;H_OJqxw zaHC_hd7^y-&$YajqJ1Z*`Mzz`wnQBc9h@M6^XR{B2^43`W(2HV6D^8oij-YBgL?lD zy52kx%I}LGA7kvv6cNI-qewy}%Schy5tS@OB|F(F`%I$6UP=+BtSL+OrK~L?*>{nW zU1T@5?|Ej>`}6(&e$PLj&pXfD=iYP9J@=f~S#I5B*IOco!Od%PUEH!~Lj^!cSaQ|S ze1qmzf(DM9MF2%FKirVnv7d6x`9jOPRCV2Q4=2mn;h$c`?*)xJhnY}~f&e`UFr%W7 z?nV}-DhzkZh!X5*$j!{zy&*}OkMa5SkKe=^Fz<8Cp594%IK;f9F7^4TrO5QtFC#_>D_R=v2(F8MaI6+ zA6y?+md%~r7hd7W`gWw49f~N_sU66nroNS|ginf&~PHt*IGy9MQ}mHN^n)pd5-`p;$WbKzjlh|8SF z8}=QQoU9ui^)lDb_CXTgW1wLVp4~~6;e;<;f@L}lHMkeP=OfQ6D~pe0 zI&>EMGS7Utrq%hmNWJyJVm>LbH&9lFN0@70xn+Fe#)KWGj_SUFi#PWs4^V~Ja^_Xo z1!5l3A4@wQf+1xn3d4yiX=7|GJ(fE$OANI&Alqc!N;~N$f2BJ* zTj-(z{~zw0*5QBxJJfQ8<>=7OwEP_Z1foIO7Ypd3tm3&iqdN9Vfk$Et=axr)5j&c{=iA!^K==@JkkT z-SKqzTT(=Jr2dj<1NIp1g>wP)7Lo44;CeqiNRTLWgbk_CB zytj3W99`PK3sZ4+>SoPiF55?MSf(>KWZdu40L>Xo@AtJpru8^i0-KsRKchw6rNKt| z__!AH@ZyA=N{a4Gh!BS%9*g3_v;9ob{TaY9lVhG($#P)E#_i_YhLOA~YaxOCir!f| zUHlH7; z6)`F4Y5wx%-Qw+-R&kW<0nrWVotZr@Z_O)UkA(1Fl&uPq1Jij*hKG-PV0Nfxu%@3A z)4I?fgu=lBP=n6x`|(DIR{Lt3jP$6ft_~MRlV&94=_!31=(h@BFM+4XhicuSNUY2> zuk2@~|DWMU!esyaWZbYYGXBzIDbuC?%5d^KWeh*pfbBPPJ<|JNiag~##fdW6mZwB~ zk1z@U9Cju?z!H}(x&ymaZrA`#A`Wi(5x~lPc+G`}1Cqy8Mo-Y&#JsT463l%%Eddf)7-vt{1If zb=wv3^mOMz6f^qG%+=HSstnGZ?@rOgVx>j#`KBlm{#~mC-iMeZLC)sEUlYZlny%DD z)NnJlmfgy3Ey3IS9Q#?~#}NNC;*@t+*LipE>6^%YI*qXXU|7nF@sJBGjE31?v^b=| zeR`urbWCK~vEd%iO+7e-@II(A6uIE1AfxjKKL#BolE7JP-=BuDx3jw5(RDBkYMcQC z_Jc+ao_sEmt}bg7XSk$f#I>0bmHkcrP&aq)cgB25 zwoSEz`bW&>>f!^*Z&QytdF976^tF=ln?Q3B&!{Ihtkw@*zWnq50(@H-pfks!wOz}v$^g-n-hYohq>4U#5w@0WS& zz33L38NRS`<5}~tQfBMVVsZb1R0%dLlR<#D=Wp)$;Rjt&R%M279DV89uJUVQw{Q)N z<%Gw-Ls}bH99XZ&N+RE$x!k}!e6Ns!Y4B9V6KSqgR`ymr}vAOtSfePF_#rR;(TvGHqrb7w>qQ>Eu+L>rq^Fe6em(g?rTgW@ntOD#N3-l6Vgk{NC z`XILEOeVB1_cGn3eBZh2zdDbDG^Zuj>M)#UrnWOIf%-V2B#36RF$YQhz%fHN>HQmX zeu^DBNhp4^6V1oVPLQ<2ndreOM-q0Qt2FUWM5&aHNGjjg_j(AXxM5Bhts7K*Z9tpaA2VgyCB!>qhp2pZeFsQTif zc`6yVC{2~%6Q|erX5}t9TjX?|nCDpDWb-jRO1z?}H9dRx2D~MdUf0PU5+!Gr#j;M_ zY)vm=WRStuLM#}0P9tp;&B&yZ&QEcppZke_Z)feW~UyKL!iN3aW(X-?4Z9 zjok20nFhv>`N6~)P!A2_UsMUS%OenVH{7QqN>Bdf%n5y?b3Ans8VSCcAvrSmES zT7ySV0fRopcWiTsA1Zz$%7U9JU`;2fZ~fVcZL##pdLco=n{8soiHumFZtlV>Y{xfp z;Sm!Pg~Hl{B-XN=ekvo}K*+N%^ee-lf1kt=k5tL<;ljt;Ry63SFMdHzs*?{QWGpJ) zSo>t(N@w>iCn0AHGp6z$9_65iq%*L>9G)*B9UZy$u4@Y6dPGc`B^f_UV41af8vlnX zf$-BFKa_6}<8@U!ER`IH--{wM?^7n>J1NXpt%cv2%ur0&e&caOE75CeWSdwzWa433 z>!&k;oHx6vf}2^wnBcmJnatN&hnkmRz9xmdhH8{Sc#78(A}u@p0=!k(6w(>^C+v98 zbb0~&7%th}SlWOnWJ?mWIrlPH^EyPxoT4XPN8X))+@@^DsU>-zlnrRQu!m9H!rfgc z4O%^Pg)d0k1XsVM&r39kYWRl1QRDn`$TR-WgY!^cgFy@XCRvgkP1SHP7hDmvnW6b4 zM7cYO?DcpPgqkTstW0P)LXd3f!%fGx3vro zN#=cmX>h%Mx~J3*TVjE%5QQ&7ta1*>;5edRbx&Q0&Q^5WMhuiv3E3!F_9!Nk?ZX># z6WYbVS8KlZg^7e>-6GA0JO9Y!Fa{iBcpmP2toDpnX**C-d4q(PqcB_GYExfHG%#e> z9$L0Vkr@^ok>Jxr#otI4OzrU+N==RgAM#ri!4@$om5mmdPcT3;WTJ?VBm@%=EvInxcf~^3FLf%P+t?wvDCIFesmgEGG5$#NufjWvgT=8Ykx`wc z_=tN-k%5Vh$Z@v6%oC%xErERpwnA(hb5hdnBI6(cBdTATFBVpf%GZxRIca?UDP*WN zmD=O$2QMrd)Oy=4;V$Gj`g>&kvP-xnbWv8_f7$ep1u6nVRS}RCfkI{IA9Fa9{^TH7 z7iVK+k|2(-AmY;phK5vT1H(xMv<`!3AxbtrIQnDS#qJ}mMo&1YQbKHh2&H{AtKtN+ zHb2*01vDRXKveyK*2-EuQ2|{SOCQFLA=+qUcHeVdx;gj5R`*>l!=eYb*hIg<$SITx zbqc8D`!I*&WuPry1fxj!8#*N747&Cekr@-Bu7^v%8FSWgdayjMW1>4(>vx5s4(*Z#TV#a`sl)TL_ z#u1h*eZafkhj`awq2S3&3o(N~1t0-ij=@1=tnB3lraO_0ks`z=htdEK0WIeZ9LCmK z`gAIw9Y9ftV)Z55J=4!dJ{4#F_#i_3lsn^q#|nnFdkql%M51k@rf8W{NN;S5_y&gR zR_FZ<3@?OXr9m5Oqv^iRB6@=S{NIka)}w<)enC$PJ7!!vz;TzsT@t=D zU(u`xM+Gt=q5-0fs@nrMC;iM)(xK|1xVNT;>1PcyG_TIez8iqrh8V{*%^6l%AF zq+~r&Gj5zX5TWs15`5Bv+u4RXZdlUOuQs#FNi9Ix9UG zyd4s3#O_NS5%mHZY_|8BA;#t4H>ff4_DUOQxrb<9|zw-81YIL>aJ zE`&_yFyiZ#&LIpkjmHI)Kc!DjT`6ylV8e_!!{Wiwsog|dCD_%V?4R80!ZDveuXcC@C z`vt031)+save4OS9Yo%rV~2JUlyw=~>gYhJaF2Yct)l}}Vyg{ZU6wb#yOC>kJ_HOh zAM?=9K*bud}H;}#KwwSlsumBnOs>Zhb=dud@5vcxXS0uYnwg5?}1ns>` zAm90R3EkegJ>n($quo=#K0n%EBt)#Q*Fi@{^1QB(#hja6 ze!bZ3r+Pajzc4&(r#mCDPrNBa@430UfC4+ki(PJ zEsei#fx5mt-(U6g;FYg8JU!n(cZ1CPZ581cKO=Rfx(OPgES!5P2I^vc(on*Wt)S&0 zARlA1t|G0K`jTm177$Y=J3fBylA=OJl0DnP4G}*^x`NhaIw>y1T0r4oBkh*(TD--H zdSjG-<{NY>Jvtcos5dD#=;23yo~lwNq312UE6|YM1v;UtT&sz;m3?^0x$Y!DGGWK1 zRaq=>NOa9)pE5mN_X4woEIH?D8FAncMZMIIuA|Fd!W4u7`Oq#>^Sm?Cz=4dJdgJA? zvNT)u@}&RQ0?Nk&UZ&z4(CpgjSs?qCu3Nv@kM=#i((A`>$6SOahb13X!}GHhdT?Me z0ypY6P8)-hKv#yUvDix1U9s3&y9o$sPPxkiF17gUer@|q3oM>$!lUs0P^*DAHjc{o zfcpRHcxi911dF%Bsf7_$Ha1Dum!?}H)lu>1t9AYfaRpg$=R4|`o`NfKq0!}JI+H?r zEPa^`Nzu@>n|rq0UyI=?53(KT$^cUo`cX}rCs6Z z9`$f>>0>%u#eP^TDO%Y{j6yROv}Fv*X3Aa0zH7g{`;3$)bC=hG?a|&407HbZ(|+>>JR+GDxy%8Pd`ud}g5zj9tk+WlC06m_Hh%exlOjFv@?xaGeyu|j+b z4TfD?i?5{rjuDi1d-`bt2^qdI$QErKF7~yb4Vfx@e>>&eE7QoUU+oNUr(ETeyOq9S z+YtgEAK%rgCs(_9t|G0{Wo%ka6fSL_pMnxqT_h3lF{8}Bj6 zTbbvMyEA=lbv7!*6T9fVDid$gh;nKp z4UG978!%;C1%B*Rp`PP-OG`x^V+7R~-eik;!>|Yn`KgLrJMIP~<|FqQME2cyqQw~( zi$?mvjcl59fgB32rK3x(?*#=+L$rNNQKL}8bvb=ceWvN#DXWW>W)RT;V?e%jv!zYz zkR}dGjSwt`**X*b=s?GrudXo~`J~v$a0T(pqrFwrEtd}PT)g$7ZDY)D@2>|xpAvoi zaDK9b`~1|x)WUTCRR6TwH@V5LG9)x{aX^^v{7U}sNSfhvtYMej7o_kLiB~xm;L1=d zy?g?zS~My)8&6x^L9w61&iI~}-bGU|8Vfz~=UJ;Tq84xO6d*@9IYbA?o$R z*D-kW_(AN}r5-}Q_ph+ND%jK;_*l=!1fgrOqR;vT8b-G{E#%kGlA}UDjsKq5F&RV z%^$u1Amnix`5{(@`&%imX7XQgikQQLOAk)ukRv_azV4UBVv98MzZVb`TphfQTeYPh z%Smws?d>>*lf;q-XjdYr-6NpzFCRNmxvq=DEBD;0ucK?nACWs7!cKGEzpPm7RL!3ZN`-(82@rM|89vi zssCmCQAtvX11N4=VGSI?xQ)VO5kvh6QSl$KPU*yA)LyLddNVJx#R`fYW8_~@wD0rm zGfm|l(W)k@^MKg+V0G_0b5xZjPYQbq4E8oYR_)WA|mjBnPif0jGsbp|bt{ zUPkL@I_UQi;op!Tlr5p2k4-fY3x*0Enj{L_D0=k=>lh>cau~?37Qd9^@eEa#JF3k4 zhbXDry3KD){(fM>A*z|kHWVJQ*(Up?H;w#B2rhA-T) zoQ&26nn7$7i+Lzx8#1%$`XDn*dr1F%Jjbdp61whQVBLRj-5oO3SEl2Mu@E&92!5A! zG1~Aw&+3c#9RPUW%XSRd%zT)q+0y5slo~EMxCF{J7_F1yIG6-PgZH;%ZOp*PT{YVu zufcpo{e3N<`X&ju!O20o!!D39|HIn%a4{fUP z#5imFleY?pyb%-ew{$ zo?N};7sU!5ws`8Oxyn`p%-+JTO`1xNzk2EiUp#9}jnIn>EO^X<>YPb+ZqDJv&a3Nx4A>Kw?oKtm)ad>1CUehl7>;1+#lvbKcH zDlE0C;DS+i3UQa(T8}&Y*%N`A2Lg6!@t+VPEmCn^U{%;gWC}D8qnHS`236d#RsIqu;8Y?wSnrMA`o>I!rP08tr(oYjfIkOzhA7c~Ij*xF zofl0G1CF&C-TDA2cEQCdY&iov$p!35he;??n!MoWfi+26ZN-g-K<@IG(Cw-wF?(~N zdu?`?DxRL?^-3gk?d1}tS9+DSAZ*f8VCFG3-05mK0La03Lm4dd8gYf84-{(4i&%`z z;REMjT?l%$CW6|D_O|XS_>|dA_gFD&(oq=Sm^(%_1&oWleHiXk^PCYh(zTMrg0qk+7_@POViNCu4yVTb?dVzYZ z$pFLk^TgOO1sq8nIdG^^rbbNIXmouHCotLOr3wp|IutsD5oLIShC#bt=>dBwW$+WA z;I#Mt0-Qy*QIl0Gu@ryvQ&tVvsjp8@0yJ|&jui)Kl}GPDM+*CAbMAQzP>nS>9i;~E zPDpcuy0qG|`p%B^Kmqci_J-yYZZiAD-H;F8faPcBU(fkemffyOo*n;di>7O8#Ou-| zfM^9MD1n;~qrcQo={9=$sVqebJ7BE19WaNr+7Ie98iUp$uCPvM>|ysz_Eb6Pq86UO zEU^2>+WH_)W0n0@VtwWyUBX9m!|`cm9~sc1EMN%mv2ikI?ZDEOM!l?UclH++j9!i3 z?otEOhZ1U_A^6zLX%R<&8t7r4WQg7vB7@>Zf*00*$$=0Xz^BM?@1c!?EBwdo#`gP< zZfECI`?0+s5s#@sI^vur|A{L-0ZDlBD^tO>d1)XDDJqoHK$T@6OUYq)Hy&QGA>kV) z=1aM$Q#-ZJ>@OCIbh%%yujD)3&R+Zo3OvyF(5gtF9#XZVg;Nhq?uax`kkx)Us52}z z7{#qF@RT@ghTXqrI0?Qoyg`$~B#(?4>IW4&dpE z$#6I0$(O9u(jP0~s8hn*{bMq3ZVb?LIbbuCVWt|`?AM^Zg8IRPeD8&(pD|=+Vyp$& zf;R(nBq9@wvDp87a65i=B!_iNi?BIzSQQkh?-b`fkWd5r{g8o#U|ECP(_J)u@^}5u zLf%@Q7(PWhP64C{E4UlWl--4h>viRxKlDRumm6+|?qm4Vc}p19gO10Ch3Ra?w}tDV zv)lEL$jO$RT%Bi#2k1~)u}*CWGaW58umk9ky;<@nCv)S)E2ix9l!Sz33|i;BF}2*KLAuq9%W@p%BR*hx5$V)i5 zuD^v1@?!AR9p~;~BR=vgOG#r3&B`GfF*BgTG;+MqR5Fv>t9{EG7zc$(yR`WmZSn7? zG&R4ptsed;aLWi;R4hJ!f#riOl}6Ik4KM-modeP*3l3jebK+1$2@p-QEuVGJqx2-E z+u2u#j1?91t7s8x6i(qEhq*@g{*dAQqj*y*z4_cWRhBs^+YoR~gRrdy2_y*=?YV%c z7Jhb2rw$tG#?E?%l|b4C#MVyRiErD&44A~m9O{ezH7$hg6zR4!UA@(N=T_im1dlcm zQ>G`y1#KRX_mErF4JcM*M>f2S*l;(I-9W9*P!pfwQ#x0aH zq|xDZI=#^EpJcN$WaX#Bu^3eC`v@!oh_r~y8|I`~GSjAlB8~uYxilg-6oPhUX_g%F zyLniNcy`p%rzm50)3q@CnM4RgQKyY_U89Hai$mXzDBHdJtCA15x=?HD0sLUNB+|L% zywP?*L&BgUpUIs#*J`9Fsb9rB%UK(W&MCc~P1S9Ggh_D_;VT+>?wEMPx% zs2|X3IsD+2PftfF6t|<(jk=mg2uV&g^5>~_@)D-E5frraB2Gaj&_X~2+oWB>h<;${ zBdbj{3`)=Wfx}!ixRXZb&b8PP0qrJJ%!qZ zF!Ov!H&`#qV20~tp9u$6(N2*h0jG$SQI~8WA%0%M7Q4m`wfhLj9Ps}GG6z?Q7k)Jo zH_EFuLOXE&*<0)V2O9G$FRL2duxlnT<-`)H~lE5hLZl-2)jr2l|li4ME+6nmPl*XW^;s=&it77<+DvxVK#J!M~?9jeYc(Q?WA{UShJ ztVR2B)epRHG};LGEOgB^YYw$(PdnZnIozD`2zp!`Xf-sh@m|(*^{x=S=J})Y@NZ6z zZnNgIW7`d{&c;Go*baO7`b!b#W|(tCuZ&GE_42E$G8yvn1i6apzgA#hQ3=?(y~W5$ zGd=Nr;IY6b928Hv*3E-OX7L+tjfI-9Ib=ZlhpLvM8EMxZUmm2F2b91+TON}KM7tO6 zy?xU3`aBcVlZP8Q-rsMSqwxpnsl@S6L-Bw%(mgtc@@~icH`(F+PGA1|3r#ttdUZt| zyD6^w(``r0FP>dYpAR%SAuxPZgVwQX8e&{0nEeqB_w&^e2p>x1{@t2SpG?=({ z$^1Co(p~njs{V5kioBns>2HPvACq30&3PxTEpyGyGD*A1)0lC(^;m0i0NBk>kd8w( z_ocKH@nC2wwmv9Uy=ZWN=5Zoff*G?Rm45qZ!jrV$z3fCjv=+g`H=-c$$kv`^`H0#F zzw4VX8mc_@{GI+l$F$8v#cN>r7fg#|=3I@T=WuV=(q{(49ebBXE!#XjufT1tb)UJv zsCpjS$#6rz`_hfM;vDml@D8Zp57U144*C0^kQB%6JI$eN4%gw9*~r;}=SOY%rCt}? zcb#1>GRrmD_p799WiNd}3P%q1@UhXXN4#|nwY9aWX&W96me6yA!Dc>WWnb=>LnrXi zvOex~hEr9)ukNhb~`T3KILW7g9YW6+U4tC{&o_w`6`4rb#Xyafi6}%T(sN!6q z9R&v<^RF5;wnJFtdX-pi1t6$Kdcr3DE3#6(qq*H1XC90IOuonB7F3H@kH`3n7YNU( z-H_oLXh;k;y3uBKP2kIop;w6!Ew_spy_P3qnA~3@{rEBv-}OlalnV&FF+)(8_(Y-% z??s71$B!dm@IR)ZVnJh^#Yg8{^g{edJ-xYLvDOzVI1y}8)N7vDTJzl&`%t&qL0U?= zcP#H$kja6EBUP7{GoD49SP4!w%9m{Q-_;;;np@1^s#X?$VsK%#Mb*a*+5;;(A3@Ht zj=20o(ZfgQ7`}X1ZOmvPWrV)*O+1RRB7Wt_-bPv9I2ZE$i>Y6M_%-F&}0a~F^KXN+Z(ER}vGS30l0*W(B&4L<|z0 zKa^xyHfS95hK?1E5UgYBekL-Z2p9W`#WEA)T6^Id1@rQU;NRQb0(%Yeip2Ey6+@QN zY2Mwd1|y272Gb9P+Xx~}@C)<<&)sEt79VWc*E{q&O=hyyKvs;fvbS{%j8g%TS9kP%`0oNNh7kYW5KT)F5gy@~cVX1fE4A}sw#%5cQl{A^?{gM#+ z9r~VFQ?MFPeS*a-<%2G zm2Fq`0a&G4fO*>dah^117ol`WFo)RKlp2j7cyZ(_?NTpY;_!JfM90Vt;ZAE7nS&73 z#Qv{;-1uPKW$T6cGqWA$wKKXH9g1!@YTYsmU}2zdBwrts_3Gfz83$D%y5=mXd>?Pt zK__q9$?&k*2ys8I^gun86gSleAgipIAthhcCPkxUKsb;-3hcJu(HWM~go$7V}fq zHbjPdBNH0@3d{X8oQkLqIiXlfr=<$GxfoXe9J(KfLkbqSk$}sT8u+cdz^HTU^032} zuTJ(1E;)wSaWyY&2?G}CzHimO*>BSUiQr?SIU=}kV2GY@Twu{tdl6;jQhwn z4%@H0<`(6nduy#PbP&j44{MF`sQd9nAOrzuvP@m8fyEd&W_T;~Df6#-7iqY)!KYPiY zd&)n~aNb+UQtJ6s#`izE?keKfmW}Ska@$E7ZCIB-N!t)V1ZjO~4BLrOg@%ozN`zqf z0oN4)^Mh{8(9 z(kA!8U`;JSE#d-8()CBCfLkfR9)8`pghBGoUakA{!Wa;RomAeMd|6B|5i0~t?TnTH zm$aIR;pD2kAQ?{7p{5L)Ww?YtDRmf9?Tj;*!)RYb)k2yaK5+fG3kdz?&Sd?}3q3Ez zdqb7=U8Aqg>QL(&R@!P^!4iPk@!Ld7glX(Sk)@a!hp(Ca!VFB#r%i7_REh8;KR z4xCk_Q(>Wgl(=gh8BWrHVNbk7`20Eh^wS2;aT5o z(tA3u=230KE4_1Fr3PM07_IL1nHU2R3X8rjuCg;9<7G2S7X!T}HA@Yoz-rSH(6d>WIaK$$nbXJ&d&)dtm&t{3oxha`f58v~jWSL>VKfv; zMw))vu<=^G{w-G&iM%LvzVgv~`yX*ll0(Gx%^HZ>bTu#53oSmnm#|??;0;2lT>1F_ zeIJmI^eQ#+4~<-!&XyXu9z3)9FOZ>HdTMoZq2LCmoz&!E>2&Y#~a?>5C;WN^_i$0qdCCl%LUfrop+e+`ACO|5fp^f!5I z8Q2D=A?;xvDPCE2{CR(G3K?syOlURx*sgQ$tMLaxGt-{N{bH%!D+}G> zp0nQ%w{m*B8BWV^ZXj=6Uzk<%Ngm@}<$mmJIS-ZXGrMk(2~%9-wHuF(m~h{GuO6?x?yEXdu3OwYVeco%6=6EF2h&nCw_nB2{&m-(0pWkqiuJq)i9F2_BBRPvGw?K z&mOi*A)z}}E0L~Z^0tHSGchakG1a{M@7<;!0!9?of4C32Ga6ke_M*W>Gg*9!Uhkem zYq!#KhAVTQ6*G!95?m>Ae{#FMG9aU}5)!Ho`12eH^6kd{d)r@pTy zDe@abekrebd(C9VjE>7EPaSGV(uuwLql7f^?bukwrR9n6isHH?!6n1whI89I#!KI+ z%E#4=AsJ@PvC%u1+D-2YJZV|r{Y#E{m3jv+gFjeDlYkqTKCq3f^iLoXm_ia%zn@UN=;-yjh$k~LV z8okthu#%XpN8``DGGT9ui!p|zIlakksrqlrxI#x4#=-MtDcZMl(W`k7E>J+yB6XYt z?k5$;&$vdc^)J17ZCp?@n&1AN&Zi&&>*n$row=pwqu$~_m4nT^recN6mcCXwBv(LV zwgcylUCyRAy;WQq)J9SpClxii>BT)S$<1~;WjOp&$@KiNN%bjy&n3#?vn$%%!(aBs zNPRxyCJ#7d^weO3>WxkZRoyYMUWJ$M3T}>diF4J)*S|nDFuAH-I$=QQDsXwtD?s+f zIx8)D{^X1a0km;)&xYFk9;v1nIoAv+DXFKKIPIgq!+snK-9-sD_UEUR%D%%ge+UOPUXy$oQIuyx{M~bYydvpvne5ZzwtL}ln@??{O}3Y{GD!{8 z`(^6I_u)Y5n2vz1x*dKL*vfX|q5Iv&Pe_DoxZ$k*b@MGO5}8i_c^2TN@|yeb)Cm6r z-oLetjaQeWtu`HLVS;Y?LvOB;WaN`J`Z=qL{Gd-%A|P z-v?QzTvF`uW6!5LvgOGt#5qK8^`XO26}pYpA0F7C|=DizT84O zo)o#kg*s^MSJMD&HHqGvEAiD`>cZ4gQca9PvCdwtfr<<9x?y=^=3=)+qs}n@eOdb2 z>ys-$hPUNMnEH zr@UV=Zp>E^9BAw3(?YIMFBP=Rua@0`{!2bokfa7($nT(|lV{l2 zK4zBJINNEpKzPDhoQXrwMm$u`zIf?Ix?f!F&leltj#`Lo%HOM}@44CSdAPMft)`JM zQ?j)w{^J$Ti{E|Pf+SKjJO)=PgUSRxx=S6Ey4Mts?WxhY$FU?7G1itq;nlB^;aKs^ z`S|5{!X^Ctz+_w9{mrKDR3G(pFOPPAz3@x+xa5Ox+Nl@4=FOZe+iJeFUqStSvGZ7v z0>#4VoV#j_yY>8$rs%L|!AG6?p0*u%skN)_k;at9*v??NGj3A@rZbkLW%WyxhZkCw zj|kM&o^@AOVW5Ws%CruwJpaywP}<~zmc{wZw6E*~)bYDH(`PD`;6o1CW>{{G|TP!VA(o_+eIXZO&QvRjBR z?87UKa=&Iwss%BVf|`u-s36wM)ANmo=F806+WIflXl0M-EGbrTasM?>WOswk)nDW5 zGewR?By;?Y8#}pl?DdNve$-z<&5fh|FB^gecz?HP`9V|gAgw1#avn>=mkx)oEOoEg zipO4mE}F32UO}p}@#Q=2C70-BiDMc1E8+QB*zUATiVa5;g>*`Z3VY`n(Q4j8KdYN9 zY^%===QbsEoDoT-lih#vk(%dQx}<2GSe5JmOx;hvZ(m+9sG$DNe9`u*HyUblq9_?f zW)GGg`gvNJ-}74*@p(<#mwSrx?q_S<8hm~V=WI`?U&)sBmMnE&v?xjT*J6rmU>$6G zt@A?gp_2c7ipP+7TclmVmyv}*ZEq{_@>{{5x>pvwS8BMnmup&tYP5$Fx~-9WNiW7; z0Y*w2J~9gjw_catlC%P;&l>56y$wd1Tz^geW~2wjZma}XDEBLD7c5gEMx9GKgRo1P zFvSsWd+r4oltZ;g&rK`eiQdvLbG5;Hf)X5+8Be@bkB%$4=@P92%vm_Uu*t&V;is&> zapv(uiz%WVQ%tA0#kN@~>9AGwgs67M$To!(2~Eo<`!A<8DR36p)tph08yMJUoHZU- z6Z)+RAFSlIW^LU9Ouwz_*qd})?ltonTxykiJl|U#Ud_( zUUR00Hsgh2Y)0DV+1c5lHEY51nX@8U_C5;B2AwU%;qAJESA-Vy#~%kB)Fdnp4j<=X z_jgtkHBN(0avb0ZSxAr{Pxtye%Fyd)T_yBx39->BNt084k*D{022H;?H@pAT(xul<{Z^p}5ia|e z@nsEh?QYM~^AA0>XURLMCXsvdfp_D_Cv(q7S{JPFy9R%X3G!&1eaV|TWv1`*jM>YBwjP!*^!>N} z>DKwKA_=-U(YOBi%HPjILL=2dvlbqCJB3EME)}%Cy6XI0YkCR}`bhN;;n8hrx^#mT z0#OOo3Nbe-`31hmb_h&=PLBW87?dzwR}+TRka)nc!7+1U_OH%R z_9^}8$W6;{gnl)~6uE2e-ED`i+-x)Bae9TAS6IyO}pC=wysl;LX2#6czwERE)z8E&N-^J#nQ%RLvy=3lqyuWWWOCB^O`-@ z5O5)UxB8Q(X)CbBB|C0Vq-+g5AkVtsmzr6 zs^hmw<1MNITTR7T*nTR}?+~coboWsX3xgb&od%@S4k`$U5mH=}Llgyeq-~TN$Aj*7 zUTHdiNjm)0MtMW!nnvcB&=<-skH5{{>uu;iXr=aSX|}uj`BFv5NN?TjK-}p^P$uE& z1>M4{ovhfZlf>npR&Qa}hl52aCI&QkS~(s+c%cs>(Q4^Um4YV}p;36>=B7h%Z^C4* zl*de-LJh^sx%}*c(Ov%vd#A;#Xn1sOp~T!`(I)Mn*Qm zeTkACf9^*@-z4zcthfc(ry&~k>=87pzNYH@;cO1nfSBNTqKuH)H0 zDZPzVui#7iP{C(G_eK19=Og}_UZFOW;~7dsamwWe+YXPde-i>Nr-El7fNZ^df1tE6 zQE|y~e#BhS8q!rCiJy-MoU6;!cKOz*?eUYd@6CoK{P=;Di38WV;Ua3yLmEqX(%iBB z=S~`bYrWEMjAk`xU5oB@`1L7UuEuSq8Y$M&R$R(D`Uvy)rBLeh(T3u}o_=$1y{>K4 zUc#sC%%0jHyJWbSo#qK`oF@ljgEzB&AQpX|dgJJMY3QYLVY&atU(NlGW-S(B&n_Id z@R5_h-gNoo+*FE}JA^Kjh(x89V^ zY1@z^7PYuG^BB*s-Sze!zxex(Hk5@$seSSqQ(_WmoOn-hJsopJti6|YiEwmbG&kl# z=WLXMHdv8yBIgD#WN?FlDhn(7dg1qY!(%VSbPTc3UDn`X## z9z2uD`Q_Rr4Lb?)fr9ItY#+iI(QfH+W9kcXz{kRgyNZjXmnV-~)~VHaIN$v;Oqc3a z`!fCMHh|~k?A^vWPqGD2q@#4t4_?^mcJA@FsDQb;o-u>xwVB$P@~Ysr#fn(yQ=ThK zRO}q(!^X-w-*ahn-YLFx%(^D&r#zqijQlh8Is5B#N6)rPOct*!6gve;;(rgAq z{4w*TLJw%$E%{UYlcQ%}%JeTivD6jhp5vT}$?ZB@&GY!kiZ zncQLq=>G=Sx6|GrIAz`QLW0YNU*cyWN5&WFj7EJ3kL(Ht6He4bzsxlrqbD%0Lo?Fq zkx9eb7Ej0KUa&t#5dQb?)=0G#)Oqf;`ancL7Pk%(wD?^eF3Hlss2FY@*<5!8k6)gTk;4Luki9;Y3nkOA_>G%>4lDX&0xj!m~arD z3xIx@4NV=%>9D(Rm%N4>#Y;L55ROjOD*qlt8Z-; zwtx565AILjHvtOnpd@PdKPU+wNKRaxFEqKkO<4Q?L{R`I4Aal?u7g4-y;8t<;;mck zBrR&mCff7s3q=u3pW(@`0|eafyfH46X*EQm!mE!S)adNddX59T<~G!WO3$hN_jlXv zaO2^{izg!@B5tSYF}_&)6>1Aquy(npg+{LrG`B|-JJ6Q7SAGq}LXDq>o+c@XmJ(Em>!)!MIxzowg(pNM4a5Tp~Z<=~S#)tqXq{%=JO`@Y}o zw$=SHX3@dUEQ)m8pglys|4c4$U7Dd$3dB8DAHvRc^zZ*dFxQ3ozxUMtk0kuh@U8!M z%`lbV_-QEIwRG%s`21{I+Pb*{ihj+T=}?cC_~*ecF#r!u-W7@xedO_un^->tEPUQy zi70YF3ex2O#w-;?ritHe3?<$Ze}latZ)+a@XMfte2`~_>25Mg*C5=pmmy1-3bFtl& z=G?20fN|~p+qXIj3vD$KF62WBlh0}@!i%PMw}F2g=RW)AS~r5r)u{?SF8ouV4rr_C z4Bf|q#fhr;q9TP>D+CP-hkd5~t(^lBR_~wTILWi?D zyb2E0^9v7>8_*+nSU7Ksg^C&GR8Xdet1q)}mB1qaUG~vUI{$~dw+zZ_TiQi~6I_A> zcbDJ}!QBZETmu9P@ZuKi#Vx@T+}+)SyjXB|4est|veviu-sj$3x9a{oXZ&DRK@H}d zJ;&(o@$}Q($|?_MiS0HCFE$2_KVP@r?hHtxSN*%6&SeBXTh1 zX1VX9bp=2X#^-Z1tDres4`+q9et${iz8Tw@lH@-2yqo^ie=xCCR9wa`smuj*{%Tmtd{So3SzYK{B(VuMl&rm zjLb7VxlwLoII%yoVjBGjx}6*Gp)rL-p|-&YPnfj7xSqAU#2M<14XrfJ{N%dUv(7X6 z0j(x^ucy85xDv0VL533mvK@OqA|i70x%kA|bs&!3_A{oc2u!;m@HriUmNBrp<9{yA zQAvdt6`Z+TkMz#xN9C8MAA|a~?MtU2pfDzRYu&rM$;Sj;@rjwXsIXf*9DtV50n^ zjz9xo^%h$4Y@?xVNB5pcu4{|im6d&J%B5Q;({Hy}ecyWVcx^pBPP*Ue^Q(ie#wQEQ zY*vt4CvBI?4M(Kge64GR|4a{Z?o7?(AJs1sq_ka4?A8?e&UxFtEAdr_L=w)N)wVxu zv36>)*UKsoOznmp*p53dz zK<(m1bP^$=*Rkgcup_G6PEGjsORE3g32r^9STgr*jf;msel@u^b~3lEjy$F-|eSr1O*)`%ACQhvEST3Wc^8r2CD zNR5EUz4Hiczu>D|tdI-8;@6QdB$P8GeZKa6o;kZ4eY|t#u?$n|TbX|e8rVwp?;KCg zG#rgcDbG8%A3Ys{emLxVuDvwh+P~wOeH3m*^FCa#YQC^`uxdeh2CTu)L$9*%;bT|8 z`GGgYJP+Og4r54l{1yN~_pDIaTaHFC#mpx$*8&DKSx*M^zQO#9b955PomK-lIyzCQ z7vnQ2)f>diseK4f|V?CEV3Dx-Mj(tE>T^Hujftq z!N(E;l)!vZf5QtKA6EuOAxfQ*wrR|#XpyG|KHa%o)DN`EX-ohpuT&vz26Qs{lx|yB zWTCI?L421oK9it?Y8IXvOzPuihN(s9vVXy%6F&Nha74ePam6cbGxgp0Gx4dZR-W`KY$yD zIGv;T?3W!a%&(2j(F|4+%_;YGO`$NjE_efI((2R^=P?mnmnJfyV#DqCj5ycgMNa{) zyBMWrAM!2bY!|2VkW4KznDZQQ)M)@)DykojBBcmf0s=(DVCh9ss5PqMgZh7wH2J9wrb- zy($_tj%JvJbTzhnrlrdHS!-Ed@cDr9gkXS!Wu@9F$k`I7jdw+$!hX3Z z4Xe-`PG1F_qj9Ny0lVkSD->Rptz7bAk!=lhb?9mrX&`fP0z~31mes~Wlq*|LsN?gA z_a2V*W*N%D+`SqZ@4&^%=bgPD%ARm z9P)en`;-FJTHifC$x3tLk7jDWyg>}e1qmnDVs3vO7jy*Z*_RxF?%zplT|?Vtl%2Ks z#Sm$J0-^%W)FWv^Bx`kz>)H}Hh16a+I=IyGmFfCL`mDuQwUZ2&_0y?hGK8S!Y@2mQ zc2*2P75zzbTYp`>^YO+GrofTwZ!dRKc?7yvl0`^!!ph)MZFf>^esTPAs@$h`YhNd3 z%A`ceK69cm?z`<>QB8f@ht7nkH-PW14{Ytst!O5GQyAdru(|j=OWM82fu$}lG@Qd0`2GEttkLE z$Z*qIHqpAQk>;k%GX73?Ux_@&8_vca>M-9JU^X1B7?mDNF3~pc^ucE=BEn&9qWj*6 zS0wDpq?O)=a6dUaAI@`W|3&tROVRCc81eE3Fn#}8%_9(l)AlgxbWX0X9^0=8%UQ9c zeIP#paauz7sxms6!_^ijQYYl;0>l_%>)-llw|Ti&BqUSFjO3s>g`dqm_+A~svJ^Q! zliomHldDOHHfCpGfx{5BcC(=Dj9hxMsLokGH{$U4+HHZ6^9u3Ukr}DfZyP(z$I?^W z8o}}=NA!()Lw(d6G}L$ji}aJHzG+Uw9q&*EBMRu!ulKozhLWp)^NkRtoinX$htj60 z7(xLOWL9^32D+NX$!e{Wo3w{pbRF)$&Hqr@lD6hXe6gclZ%15JT|MP07w#ht-fjh2 zc5RJ+Z%_)|k1tp(8d}n`wzaXD)cU}ILQdtQb$3VrM#Dn!L=C}^E!C}Oxn8GRQdYwKauZU4t)&w6SsKom^o^NLbjnnM#u$q#FAbgU6gWMAYnPLRO?4 zi33-*_SrgZ7XYA20!&yfLQ-z9vYMJ&GV~14UZLL{er;{-OuS~(ez~Rbj{XGT>=%^} zV3n5kJOFX&n3ZtNSr69shbldiOyvoaR1+i9N@8^hov9I4cmtf$#>-iqyRrCh-?G~L zfBpe1MNXS~*^j&Z!W~(%@ff;ikHgxT%^C_1jt>??;mwWHVM)&vLSY~SVgOQAF^tE8 zX(GgG?d{GO@GucXcQ`~vLi$!u>2Vyg61G-J)|9ACe-blSVH|LWLb{M)+bf#|LxO<~pH!#KKE9?Xlz%LhQ+9Y zc-E?dVIQlGZn!_S1NzR!hG~_**nzy5m{`9>mSk~~p^YvRqs~a!bmi02)7kcgCDvN$ z1=~~)AD%h9I2a|C)I<Bkl$Hd3ywyE_JVw~?2lCc=H;EOz7e1Fi%kyIri zACH$YymQ_|SmTxy@l6-@mh(UhIg5Nbm6h*~Q8K8ZA{hGuwsul$z{1e$H0Vz0ZLuT@ zSlIo{jIT6=lL9^yccy6|$+Hw^VJi3#SHk1XZVUhV`ue~{66VauN?}t{-v_22AE+)KDm006{5jW0Vi% zcL)4S#WMAr!-xo!M*ogCn7sqMB{6QS13dU*TVpf~^p_IHxxmS*;5cqSrvW!~{~Mb_ zBzEbeB2E)w)us+3(lzQfPq6U_!N#bl$NDv`4{{b>#HmZSaOlc1(9?2~UgBC)NJOMv zU|kZxS+>I2gdVyrx3h{T%G;lGXp{*5S{ayAXcQMgA*9S28X<=I6lk76)gy%C+h_Dn=%_^zvu#XQWU)REV(?|0mVSg|)120rjyRX1##5z?5 z38pb@U7QR9%)d*SfJZDohI1+}kGwn711fl zALXx~Q6EGd`CE~dvfwE!khDgIKE$wIU}0`v zn2v!t_V|nKU-tlRwTB2s|7QkpEuz$Zx#>ScL%Krk>pxEv5>gOll>6%652H%lDF^lM z$yxmV@zDRw1g_4o|9+dl?xczI_jdd5FY|3ERQQ+C0l~nc32V8X7Fa4ODkB5M@`mYiNH_lJ z60-;o`*UbWY?Ptckc3s_zr>THH8L`C9OX7vWiJ1d&q*px1|7;45w|oc&|;6YD@@g% zoJ~2EMJ8PmG9s0sZk4M16qS5qeWQ7~B$VY0k?$;Db6)}KGkZWWET`bMW!jreR;2A1 z85*WJo}WeY)>yqY(IgIB(<+pmkFo3?L6g`u?uSGr`vx@z_jszYn*=v^X?mkCy)Nz zo)MR$e-`=R4_{H!OIvfF2NT{uWgk`$SsqWP1GUCRAa7B-@AvUyFQ??ZZl3V^VlC*D zIc-u-K8xJ&4$5n^Mx1hgA;zn2`!#^R0>Q(v4J2XL2^=!`8soxE5o_$0ERnbE|75S& zQT*tYK}DPg!>;9g>_EEtSX*^y9FXRx#jzFUdVY32`@M!>aAvvLu?Xpd<{xPd@mcj% z`I7y@#(rx(c)1%Y7DE+ndMsv(^6{IXB4|`BdL){u)tf zWo0+b+mnv(zbDOMk7Z?K^uEUW9gWWG$YOI=I5-CRT2g)EVf|XD=&3CCe%(J75i}!a z0IOLsRWwMY>|ZaaMQ_^&ZKD(wSHDJO0qVJS@-OzKqnODzRdsG7o${EEvBJkb!=p}Z zI1GK339m;ZuLQ<;#4j85rp&{O)Q0Ii_1<{ot`9{S)Atab)=fpoe zH$Iv=7jK2op!OCoDv)|N*;-LO#uK;MOlb@*ZC?G}+R*0X#`kgLN9=+i69?UTtUaUU>>C-OywVaAfhaMrEw|#VlT1~=V7_dZ*pL2O#U-FxO^V3hxSEUnLXa+Gfw|^M*J)Ys8>pwo^Z_V?vcaJl8?5KrO zL6b*@?OUPm9O_J*GCkFy?bEH?51g62#7Bo`LKh=S2dAD@E``>(sc$P&r-tuV62_K^ z%n+KgwPv}07%=M39Xyyz2IDx1uUU_#Gh5!B$+eljqVZ<9@}|$bP^InH8};=l5k#zR zSqAK7>M|+Iov7!r$>he~fs=WhUt-T{7e$&m=eNsy?bJ)ldlNm7V}G4%UTMdoR)m1e zKK*{Q@ezfzlN(qG#d(`J8QMF!eASbGs(9NCW|lJ%7wGDiqT;wFx$ayxZ05A4019;4nEQ%qojH$wJA??aU5tJtA#y#7Z9$X9zZcNgP?Q%;vjDt$vElOSR zUbG%NMC};&%K7Q!a)MKgi8yGl-$z6!^!|YZI}D|CDMqpG(#+%`q1o~eRKC&;Wl#$B zX5@l5hj5Q>&&ZjIdt^$AZ(ZoleO@Es#ft6kPtwRM(P&5YjqtFi@L#O0sICsshaueB zXd&86`tWrZ3Bi4ZB{QSutafrYe4G|*fmZISp8sNH;kRt}Wo#9-lHI^{f++=mhNAQ- zu}(i%JB8D-+>=t;&TQ|`(gMh@-?9T&G9{^#rj)i?zR=x0-bDn(6UA_wUDi{G!v^EG zx-lVvRS5M|xRwR&j5RKDUQ+kdPl)k!fCKk=N~f(x&7cr0N*5QBxiM2$EONhVP46;`Yhu?+QngK0zO6uH>06pS`Zb;$M3ArbdfDS{E2#Nu7I- ziZ??4eaCnsGUW7et%SZ~**rWpPl-7{8hdlnrU{MY;VZZH@e8Y%PX2+-@T?ZS8I zhMU_7W#&4H1K`HmZ|P3Hyi6imc{Pg^aFy8pqhpnAYIer?L7tY*sFuNRVQV{>GUUSN=!3S7#LZK6rn@?2I;B~8j%LartzaR%>{Q1bDNI+vkGN@=-+ zbbYRpFYOzxM)Z^CNIzW+PzQKOwe5N|x;sDVtb`ZsHK5M8vAQkBH|FSZKKUx#Ad2@m zIhz$`|r!MsltbA zIi|cDf^-_Y!EtvRNcLG5inDId1}sQpyhmrxnZRxzoVa7^=OSJ2r#hQj9?)CJBh^-@ z2}O0-dpbB??PPJmQG|oIv+(gL-*K&DdvvDLQz0cx1(UIAhc-t#s6$%E_1bR4bO&KU z#pqbWBfyBo>8eo0+)sx6!QpQ&iXO z+OxGLyoSncp0{Om9=B(E93!(gD1O9HjXNyob_pR;bhmtCImi6PoO&QOaH=EABOCKG z(rv@$mSp7eb-q&Ys^lnffD*t?h^LQPN-pp2v>YD-F=kcc#d%Ud{yc74WRRwW!#Ak4 zO_>Qnmu1K->(laT;gm1r;^ncDL=8`OY>5||GcfL~tr27TCF*n0%|6oO+7;!TfGpau zNE&20c8?%|FaOPSH%&TyK7{Gq=(zm4d?^ZB@_m&t`X2&pWZHWL{lD5LT zw2+uXGEVm)%vTCOK{te5NY#!F1snSC%jd%dId{ElJ2#($=k;0hK66TOX@Ue5jp4CI z%qp#@o^$H`kKyW$mi5c{-NOPJiY0FfxE?U?e$v{gMr^$Pewx`S(?HEO$vM130K$8= zTj1*XZK$-`&7%fy7u8AD=#Fr@cGuA(PHktMA^-Hjib;ju&uA@*V;mV(r~bG|p^M)}grdrFct41%v}eaS=Hb0Vl8v>w^^f$fj~jk)>e2{HR2OL+E=?n?PHogp~)$w^ZJsOL5#N` zeoEAYZgCUrFPiJaz`$PGvW#^ws(*T%`Aq5(Ojn<1)druMlEHvv{dF(?21)%q#>H*5 z9*j|jFo~yvw^eAR!)MDdZ{C_y)4)1k6g-YZ)3e!}@fIN=!IpfA2T3hU7Sis$pdUrN z!I0Q&DsHz#WgEoZa|`@@Zi+#{)%*SL@)*LdFiLn_LC1l)vyPUeSH$!Fx!7J~H9MT# zL#Ple4xppCEgKUy))ub4X@KD87P>5%j?HV=#5l+n@xvR&>o6=NRGsE{U=?irs=IX) zn*I`zbX|F_TNw$qoXTuMWi2c-t<h|?+FP)+tqn&-Ah^Yq;Qz||7Q-8nq#+RcC5ukh+xU7RBejPWHCluUhpglR+1 zm#-hM9XsBhn_VcqMGwAUPGvIrBdpakxjHAUMG{zG@YPhlhG)3gfVq35H~fG#aB*y^ zBAipn^b*BNBsh4j^-Kh9vFrvS`f~En2oQYd_?~Ar8xaEVP!fZC%TQ;1l?J4BU3*Wc zo#fY5te1i*KHqX)sL5L)v)Ftvn8Zir%({4Q8bq7Y*g0W#pj5Qf!p7x=F|UiaW;yOf z^}&FWY%U1(UJ{1M8bs0t>m-j&2$o`o)Qoq7Q+E5q`M}rA52wLsO(IV* z2C|8Qgwi!u&pGp+86Hm!sz(Eng68jV^_U+T^~`??ze%jMy)@<2V^@{QU+G;6EX!^R z0!&1ZFpD|d=^aDlz9p`?%JdQP`Uj?KUE8of!e?*7t-PfNVLl%VfO0@0(Z7Lm znM5zL1&P2$_f{**^5EjNiJAN?6bQ-=iBJe6db})z3R#Q`g}FyFmOtZ!<+^`su{Ao& zzlfFlk;~XBiO8|eLRsVnHnQhiK>M$0Y7FcZU9+rUzNt^k-}BbDuBPeuWBq$*CN$lp zDJf$MsJKx=76XT1?!Q>dv**Eab&bfG1eEn+5sn39qJ~Rc>p(con`_SDHwZ^g4kB0Vy@E4StIOyN<^&x0Q#UA*Z=Vr9_+I@0cn& zvGc@otw`0eHf!bk5@`j+>~PlhrivRpT5XE&1?qqIzjrrPb7ZFw_s*zg=x}}eLvCb+ zoLXFSn#G2XqA7D%9Pu{W(86L2{cUm~jokC`VuJNA>x9PphgD1QEKE`XDA7aWFO=70 zCw?t2_^G4Vr40A#g*8Oc6FMsUVcVIX0N@@eB8|LE(K ztc?)D2O{{W^xO+!jOfR9I+*%JbxFDe`P9R;RR(3c!F;}yux-MbIH+(En!9t?4MgGZ z*71yPfz0t1B3g7(Q1Ax?KU5cpxY06?>PENnOs`grB9hqKFCJ3WrY&rq=gY~@u7-&{ zyTnmda|K*st-#LE$#^*n@UoIHGE1n)S94xEoOReEk`3e*R7~XNKoiwpvg&~w4gsf& z9j8gTD76=q|7tML}o9yo*-JsIy#>4N*+#C#0OiWNn$mPmi z8iMGKpP%3d(&mV~PW#_PARbXJOC20UQeQ-wopry}`4>^R95xj$ZM*`pBtVP2})`Lg#_)ke+xhFHqj^aT(gG6L2myi$!qveH5@8z`=6)@@@*JV@32%LQM}unq`f#WccDieXF={ zu;g1JYVD%Q9BNS1F;pNOMNdm3`BHHGoqr5^CM>6& zlnX&g6hR9XP8L$ni|nZuar|!lWU772``Onib44;4WRS(E=Q_o-upQQTga62#YErQh!bBT8YIFFv)A@~v>{(B zFtS^`@Z(BWUFm+{O#XUI4%1nY7o=IsEmJGgbj9`j#(+X(mLl(~5Ut!@Vh}aDWkJT0 zcW`!J?-P&?df$~Fz*ej9NoPPFK?Id;Z!QS?EwyQst&k(Zx@PXf1J*I(#nq_H{Vlc< zv1Yz%sVlp)l~uIuMsOR#^_Kd;eQx0vLIVqVZ+N)$dWpf8$t_Asmb~?MS`we!zFf~( z^*t!Pf#>aVRhipwY_JPPIg2Np+4JEKfEkd;l7?r5`yA^Wn@&b)h@g9|bvk!tCNc6_ z`sSx46>Tq>1`1+aYREON=2&D6yF=kDb1CUnTT+WLvKkgvOiTsl0uRJQ+mkRvDYwGf zD-my!p~L`uX_7$!F-S_S8eSC`_Kyn=VsRC~O87XPZ^ZYQXQJm}^S=smh7&I`v%b zicFs61oxUIIOg;ENuGBFn)H@4`g?_SsczYO5}Y6Y%!HpPNBhqP19R{7Fmby?A`_GhiR)uts)mE3s$NYF9{<*JpjyLv(oe1c0P zJFUj|s&f=pFo%NyUj{W3@mUHKg%MGmr-$Iv8@4))K$+QvNfac;K{5A9EDq7nn*Cf^ zX(zI7`*zkeeaObdP0W~;<7MskM$;y=6N8V{`^^s&MyzS(uHKUUfrr6%i7GEcY?dWb zo9@IxL%(9F;D_>QqsE?A(C*rLbSN9W%t9jq0y_^m`=q?w23%jnk5Kw8EW+3zlG#UR zk_76U^%4)El?v)MvxQS#Zlk){n^Hj=Wo>O)^S5JNp@7FGyG*0t?nJIu?HiDnEC4MI zHjt2Kv44@0f;MjG-dTq*~fBHV$w=><~tLD z5H_dCVLeR!q6;gY+ex@lbf?H%O-@t`h2w|7`AZO%|}9@{(|Jb8sYVaQ3+ zbtJDZ2UO-7qdh`Aa~ddO8&YLI)7Yq zJ*kY@BQb}NaS;pRa%AQOauq?#2^x`5kU}fVhSC9Hw4*9Fa5sk8fB!_Ldh3v(m`oa+ zY}J_YN~?PowrRy-1I!K>;etG35g$EoNr~&pvFLXnhqw1e8}!MITIwoaBuTmyukV3kl1QJsPCg(184pLkVPG2u`79-g6M_y_Q4n-jvWu8sZvd=hW2b3+p%h&2}voX~0wImlzPoMZJF+c7=EgK$tpkwEi zbU!UxS6HomsIo1`=p%=_+Rb_`NgWpQ-!r-W_Pm;1+Yx`M`k&ugtZ;6n zOFi*8Zx6L=&+f0I_^#>w5j{8IZ}*oTa4gz}KbgP6T+)4U6myY1Li~|sod@DVNIsRk z$0{NAHdUZq}u|Hr&8` z7=A)<9E$jl=yxCcOVs|RrTX=tfn8+Q&+?cG3e1IxXefPyPI*%UNU?rx1p|LrE-Rrc zW1H`PM8Y*Vc=KLil5^N$YFa$O6DGcYP@ZJ4;dep#p($x(rp3%T<8)GvPC=p_^zXR* z)K3UNu)QpE8!J$ye1nTSfE`*8)tC_8W%XKS;d?M60rB2a6WITp)`5(R1jxY>d|2U` znS4tc0ZFfb*z>Kd&&EFXnJ9?7Kudp(a3RjM0N{8L42~*Y-J2r(y#V^!D$b!jRo`3s zqo+a74HNI_Txi(bJIESt+hIHEA^t_`|1_U zh+1}w-M@ADC+o*E#;`BPN-CL&4oM*+`RJ6Mt`tC$kEDS86k-5PVHUGPe$vINHCZ@CIl{_Jn}D}Z z{*OF>0G5s(JsCU>t|?+jjA#PJVCYtl$3HI&TnliukQzKL6A*C97|c{8yB$s3D@p!6 zo1&;E9vTp0M~}SZp1@$p|9kSR0B2V`ATaN6>_?vPB#H zADIEysdYI&;M<>`?{>LWxl=lax&PO9c0F1Yy*iS}*rAxOdslAqKFv@^iHUVu{N-@| zKclum?fngqkLuuc&px~7Bc4x7T!cbRC5(3-dfNlY@js-fCOBR6q1d%2Tq=?vv!X&J zP5Z~2XCLBQ3-r$AvDb(5js0-XS(neH3$7UxOQPP4s41K^Qa?oII%_2<-)s_fw%2=u z3AI?O6aT$+Fk}5Ze0+Qy``rObki(y$x9%pk8s!E;oFE%Gt@35deO{x7A@xM^5N6*~5NWtP~g#r$3 zNm>((WtR_ZY*JpXgCmhcz7!@o8;KYksydYaT2Q-F=jZ3mlm<06b66I`KVxaf#orV9 zOES`|CEr0ROeS?@hSc+s$1TQAt1)*pbhf{@o6hpr^cYfe=mC*9f&Lrk9wmeXx+3J9kJ|Gz1eE9s0n~LDH8b;Ma`>fLRkD>9A@o8FI z#O&Hp#fKGfSLot2k|^rEg_L*nxA-}VWCEGUV4obN5^R6W$1N{deq1us;p(Jsc}m<` zUpU)-k|IL-{1Z!RonrN=Aw%)0}Z0Oi6 z3GoshX=CjXG~l$z<8TWRpRHknFCH4|2$hn?h8GDRRxWby+;8k#ObMY*_wplhM*}iW z|HYNrd_Cj9w$E7daN_M{>Bj$?qQyjgO*j0EzHF9GQ-2L!+C;+C7js z%g*yOY|H!z!my(lz|+`cYlCpz8}NZhu>P`gLw{Sjt73b_QSeZ&e~e2-XY`H9jb4)6 zF14P=T5TsQ#g4Lw(avV>UiW=T6+q-5_>~3?G;VIYabgshwG!wkuI5+V60xh%BhMHV z386L(7|oSW^wcKL2)dUy4$+APFIn}wY0al8@At|{O;i0tqWK!_WI7x2rl3;P)PfaO zF+|b}6v7#7E(jkX>E_1Ui*}-tkADVf7|RlSAN&v?|BkyL9Hci?j`8Z(I~ENM#5Zd; zpJByvOc!#&jIcY#szBbm=a5`Zw-V&lF1R>0p8a=GEJuq!4VjBLG>tps#EZf%NWg&O zE;fDV{T2Nu;X)tJG{KMFFIPyv0%zSMf;FDYq&WHIqKHD@vw9nhBGu`*!bU|%IG$1A zTfo(lBUkY8VNt>?0Tdarp1Rc4J<_ng%Ux{^yM^VXxC*FBAhNPDCEH&=u<#|wx1c8S zH?mPn$n8Q#a4feOL2uv#Tv}S%3COxkiHmAni2^qQC9ru~Q;rLMm&~05_Z_hcTn%}6 z1;11y#FoiWyku0SRZHn3U*4kg@0>q&a`Po;VdjWWfRI-mOK&i6atOV}2#-XnYp!!nNFO;! zvUfqoslExObZ{m3wn7|Ewkx)RQJOxA+lA6F z-%ov&R{>dbo9nxEbRrwje6nS+>qDwZ0b^X3BfP+$70JQ71Z30kF2qk}sboHi&Nj7&C3Bz< z3ZY7M>#}Cl+G6_1MgQusTiR+d?XN4X+2QaYf}U)II}Sl?*_(US0D8j+5iZ}^oFCH@X30%8y6N8~Lm?%N8~3xdz98jj^w|Bm zhwp&8i(LAF&9Q#JoP3;%)(d7B*J^aulXp4IsE92;HX1w-smJK0pag+qTM43+DbMM& zIN4;|(f=rht{-t2%)P$R55$%1;UkX{?mA&K{u?dFCHMcEmQ$?9=y}`0(9+sgBO_$- zxS-vjs5eY}df^lhA9@{##^tDev?#@V!y}5WqL^zQ4JGU**Mx;u3j)wM!F!gHyA3Q$&<8I}4V9_})fZV|M7#P-t;m{vXGesb4r z5+CnqbTZo2*85~1Kq3W&`~((OdT&>zSMu3kMC>vB+r;qN6bI_~uxfYohw;YVLOl4u z!tbFMztY6D98hmZ@R9^tF9-ngHx@QlR5pIyi5JTS3*>piWR^^1H?0L9^87YHe{a$e z8h6!RPE3mdL&2&n{%9Hbi=8h6ZaXn_`sO@_PU{|^$(nlr{)Ke6$IozWw)ZD#LT8jh zd<6EFuBsw4$UTB=F`vbD$?Bi5O*cEOV07q?bz$iOrVeKU4qL-jql>FY;#J`d za0Af8-4e!RrjJdXr?U^k9me{p7nYOjpq&g03#zlKo^D>qOS!?jD~70fTi*p@zkTz+ z`RzIQ48Fo!2iP=qlH%^rFXmn73yA?>?p89rG{4}nD-ZICHh_xme6kIlDk(Bf9$b0p z+ZmkM84=*tB`5xz#OBNR2#0XeA18gQT9P2wCSmDMmev(qBU6rP=$exiOov>bZXM4m zr9H@6&6?lC%Hgtk5?+kH*8bZ6nCkbCsvYYfn`)_WXlv+2uo5RP&|I+7oy& z{~n^zNjxt@57Q@fN*%@UU2c@g;`{eVlf?b{wP@92 zMTwYZTo)IY3hRvC4}*iEN>f}=@Oo=x#|zYNlA4Co+6we?P&sm@GM<<1l-5_G;+P%O zHirAzFo{R=vTx8zb8H%oqBYN4&m!wK3k#D53RmxYaIZ z|KZ|BX->Y32>L$K9$!jK^wawt83it5R|IznYN!A2{*buo~7s^*!j!`fvafE9o|Wr)>1z8V3q)Gq&Me z$+&LtB!nfnQ~*BmE%nI91j-bh^zQk-=g!d#G8Fv^*;G3gJWh2-oOieFYn!MK@VoJ@w-)2nm$&iz5tW&YKM;HOfi+bDvK9=1sD*u~%uC>THEn`Nw{;>f&p2?*p>Z z$1!o%DIpdY=dVR4>!2-drsh!G6@E>sE?Y90FQc)JK?o0{>|nAh1Pl=()$Jp^)Jvgd z<|8+N#MbTM9SaRcjM4h)2j!lV^n6H8AtFvCMYxZchX$fojHha!n#buV}X1H!j-@DiR!(_BVqi%17!$e3at}B0AwH-(C$2 z-aY|nOx9*G^$NmtmPL)r%hKKu$8f{b5yUP>WYs*{0cNnVSO&3DskbhD^!_sHNc5v2 z(RZ{&I>NX7Rd@QdM5%=2o?F_2hE-{`ywt%i`;nptE#gz%fC=v+%^Y$6rP(bmpA>

>)zw)g;KSAxW+THFhf>lEKciJ`!eQKgSvs zO5eY_b4e(M{o)n;l%5}V1-+E}1B!UqHaYkswE$)6{^rLdb4QD!{u1x9`NEME&RLPl zl#fZYhv`HJQ!msYtpO^k7Aug~3`yL^Mb{jT&lzVXamw-o9IcX9z$BNGisN1P?fn-a zG)56L5W`$YtL)(8Q=9DJo$D*B{)y4SH3FZTnH4sc@rDWNzwGooO-(L0%2e=4GTUsT z7O667NO#^4%&Zd`_74h#)xtaXSv(HxHJn;=0>b=1#Mf)1z0sKSPi%TadQttR-{sDi z;z#k&_J?gd^Br)Q91<5Ybb37*ei?#-{+x-HlvQP1@H$FhxF~wMMYA0(mrBZ{iwp{V z42dvL<72)4>bN&q?N#hdkLqGw+*TKJz3;t7%|MH8AGF&ix|~GtM4`u(=E3VZzw3axIWF}^_^HB=pp>wT&ivuFLys<>`q3!Jq%AC_yDs;zX$6WZ6x2% zcUaUN7Wc$VtjKJ5Q)0hshRJc=$dCs=BHVOFwrTv8NH?q1!Qk>UxevENx7p9932hk#(SM$i>u0*3U;hn~5V)5(*Qytb?xr|llI)?HPGxtKn z^^hH=(TDP+N)1wZLlCzIA!By1jFn0YU>4;EfB$}mpM57c!=le}3mg=5|{RM$m$5E=W{yTjI}n5O6( zi>@hkFJ`=q{-I|09SuB;fnQB3LQJ8E>pI(_{^Mdt88L6!BvM=;#@X@A$Fd0C>&yA^ zvf<9*nCrX~!8XR{_G0*?%8`UVfuQs)pJK(a7Bg7xLCgQ0dbH|&=U|gcOc8v6we*~Z z7W#f~?#Gu40dF7K?@o45YKUq z7ABw)VUCf5q>y+7QDlTf7SZP$`u`^_$s%5L1XOR=KO#YW_U+yCca+V2&M#-FUvqWQ zjMM)Yb#EC}N0a_*Cn31ILvV-S?(V_eLvV-S?he6&1$PVX!3pl}65QQSlSk&6nLP8K zbKbSybxwa^e_^k^cU5(Fb=7rWzwMV7ClL0uJb`x8ol>N}u$!#_lF0;y4j+V(W-as|c#(SZFTRt--uOkMQ zxU^Qm+<=Tle;QFCRwj1{0?+fked<8!J|^b;gVg!_oBfn$Y$+%?>v{$f!t}@syB<8c zba^<|;<{xaa-Ao8GllN0mB1?@Izf%$ErdQiujeMO7L;khaY1E~O*F5|aF?ndZ24HQ zo1!~YG~A~4`GI#Hro&0XcFJI?$>KUPw#eS&PziY6q>6P!DC(y5!*Q4b9zYeVysn6; z)6gKuG<$+OlM?l1G{bi-eNFKrA)Q>KIL)onIwD|uR!6eHUAFN*D)(_If9$(6T9BKQ zyCLVn;y^((V~C2=2+Gv(eDgF9Qyh#D;uuLK2jejqb7z4Z7hP)tB@*U|UvM`}aCD15 zqV)&~#EbP@BNkkjCGubCq_8%IioXcBK+TAxTR)yMB!#-?e!F_WZ(Z9QY{$v6Qs>Lh zaij-MlB3Zkk=Nrw>yHhxY*VwGx*K zlh?#zxo-Mkp0EMuiKZX+>0%PeJ-ua>yGW!!tDhmJfEfT{O57IM;|8P3((!M!K>UhKrtbu* z_4?2hTaoGl`{WguMGpFf*2!8EMrS7mmq4ucfi<+;($fQMEO#aQ^;!%C2I7KfL*9Y! z3ZlaCcXU{U1+|55n%z4@FnB__o4A^x&gFW*`>1zoIN5P=`$0H}-gtvw3sub@u_Mh;! zgnx2S^pl)Sz?v-*Qn{N;6i3^uTeeGfHPsJt`!$W#s7AJ3-^e1YPbimjyF2=Y;q(+! zTz{;Fis$os!g8)9we@B9r3~DZEiiU8l%N;VH&WLLVpHtF-haG(Q=?SNKm1+secI&9 z(j^R*0gVQe(bWlWX2bQny^bSe^(-w1$K21v+vik`AKt0Hmz{qjt$>ou z3Dq;`;u~D#KTdP6U^I?-B(2?uM`WVEZYvU?DpyWlg-G!KKiLkdijS3kbCI=(O-FKVrp6 zDt-qcO5cOgsMwE})w7-HKLanHWphv9g5ww8XyOF723|Ge}|+-hjKBN z4dbzQs?{x*T4;FZXIMU^AC(bj$@}6<<>QmRu94!;P0^im zW^%o|yxa2BT2JEp;(Om@S{9x=@k#~KD6a-%{wJe470+Aa{iagG0c#hz1x&*u9lDTWc zz36d-T141%xb6vCXrV8)f%c(gcaV8-MdPn+T5FxKUgfwMdUS(3oux|0bo?Uo5fANxifZvNtm!{@GA82sm9SMv8v=c)@TEGn+6OmO zTmFO)ml5c;thkW;{=karg|CG%1*)bgEs}V78+)I(7i}dxT2XQ{DUl~h03uF!O4!3E z%njFkQK!pjJ_eDS3}@9YGdcys_uWl<5`a=IbtzHNqBiDSbS$CAu$uEr2)sfdy~_kpkM=2@N%RHymQb+_m+ztc62P>)qxfPdMS)G18ac$@`?ASsRz z!X20gisX}V@;JHql)asZ2^oN}+L!t-73PUz36qFSUtH8eQeZ8XV53Y=KH8GD@Ra6APw7e{Oje{H_df?3fq z*C8@H`jw_`K)N4~v7GpXvD2*v=}fd>oR|!qRM&Nh#*kaSK@E|lL^R!!^iIN6(Qp5v zzg#`F)n#qM=y|oj0Ue;N;7O?1fa_2uJsB5{U1m&NyF7(TQX}_E!%82?sB1MC9u0PU zFg5=D5b^FuNeoR^4X*D>Gy=larZXxnBlUiA zRKtk0n~SV#{hMXql(GaLRm{p4zJMR(-;84Xu!LT?hgC`T*Nt|=SA9felpBoGD2H{4 zy)n?2dM@v8^B#~6g1PuK=Jb+beE=uMXBQ9gMBjkpa-Y+WMw0B!NEd+LJhg+z@cP*3 zxigk+19h)GOoi4G3 zM3N2*%PYUM51)_o5EcFMh6<9geW{`;hkqeY)m5Oc$rLL~OBdl)W0kk>?WjA!&~gyw zm}6s*Mh(phDuxe%pa8;lj_`vI^$tcPd5lS4s4MPzg3+3O1z1F>XLN)x!aYz$6HY{R zva_^2Q4f=%>2q8!-b+~rN+qhYvmm}zCJ4jEmDsQvRDM$4# z%nCjcD@^ngdf;>qo#>A)edBD0C*irPH%}jpUDhF#b5rGwzIp+;p0|Ptm5tOiTC?O> zII3l^0jDDa<<3hj8k1E<#F%K`J!3C4+)k?wJWP(om{%I|eUzkiOJ{3HE->Q_f*P*9Z(`UU&eWxk|hU2VedNTe$-^bE5#C{K1e z@r2e0y$Ht+ z!I=Ma8(~bf4D#8--aM<-mBADHoZOBtIRbZOAIP1fs_*Sn^$p@5UbE_ViWE>XJ_M6h zp~0GH4ZgR#i@tqyYC<}NlbR&i)M=n(#X_0xmF&xwf4P3#lu(UxquZaipDduLuzPcJ zvxrEiUSl52XvAjfR+8b+!)Pfjhd*uYaYR3Hj_`soELdaw76D*g@m$}4z+kwER$#m8 zHDSkmiqxxXyv-E=?M*zR&c(1FLF*SBc?WSH7nG~9?RgU@y!s^NGTUJTc&#<@E z9IP|!7A|3=w9{%ZxyO=z(on51P3W%bx-C=OKHibAaN%6>KK){TKxW$~1AlS--0Ztm zSxA+pz~?f_m}TqY_9LComU!D%G0&Y*SVC;2F8nCBw*)Hf!KrNHvHV6>t(mJ5LVN?v zd7Ra=E2*sVh@b!AVFnB_nw)X_6;9F3DIFmLP>85#=07CK_e3_yirV036)!6k=C7r4 z)Wd=Yw}ipiOa<54I*`vA0!~U&)^6=+w()4j<+D8d#21ld^A~lAS^!`wd7bXaqBq-L zA{Tpxilj`e?|MuBrJu^lWMI#!x(H2KShV7Na~ko=>oaJ@KO`vmAFQQb5(<1+AS@ob zhh?GlzE00qnz7%T)MTQgD=EY4)qWIsE|Ff+H`VP>m(D%iXf+^jwM5q z!?mM94;)8|sv(0{Vd9u*bbtljl3n97<$8R)a=vc0DlK5-!HDG5i(IyC>8c>Qi|%^i z`+V>Hd7!x(w}S(ZP4Kd_rYHJmmdB=-D324ph%Cw%Cp{&}*GF-$-fMA%dTO)zRSG-T z*@-?%{P9-c_5GJA6{w&-uO56)oS0N!#F+``hWoub_f;(W1kaw-wfTIb2Mngm$n}aJ zuo*U{UST?wLw*1FRfL&H)h<#mN2k1(S}q$hR&mx z^`pM>aY8%lvi~(^r3`!pyjN#wI51oNHROQDq@k#5mn=7UxXVr7qasB$?Rl8|exOv> z$n#a5q7(S4FPi%$FFzH^l;F{~s>)CgFa1?G6QZv1JC(J-q3}h9J`(tkf1`_5x_&Hf z@cS1*VJad5pQBwI_{YZpuckIRUMiJ&)_3j97paDwr~2G~X*b1CD$wUvZs>0u?v6sZH|Yb1Hagqz6ejP{ySs~7#zLV%+Sh~5xXd7L7Tt$y@w0+zFqYu zY&uU+yNQ82i{;J2Jx0=qU+VPB15b|%jvf;TahnsB>*FCOv+;TWU_AH0W zFK?0Y%%)yk6ywKiAr7xKC(`~7eC?C){i6pI3)9lm(=~0E{&41~V*igP0TnzYbvJ@8 zKzIIm$T`DH|K(Z$ES9ILg5$u*F@73q{Z zSK-Z{uWHFi#7_1K-*Jsfs8@;oW~d5jfpZEZP`;xM6%g|UM#Oem8IGl zFCz82@@u0K^JLJohcnlWh(-#khR8g7r0$vBif#|_MjO_1R;G`etPpwdn_r+>NkcD> zu+p2V%G#Hg(aF|3--zPORl(2~!L=V3O%Pj%F|8SaXP%dD81xLZwl8$7aZ>4Uw4WTJ&AOxdFhI=8#^yT z1h5TswvVFP@VRhOQymSJ{~=v*Iq`TMy@TGdYlRt2b2Y;ak0w7r@w+`A77VWOfoXT$$gOS&@Y!DDUtW=W@~tGKNE zwBzbTcr!R=mu-~%Y*4Oaq|(=Rkp4Fmi?UIi)&_E!Iz6b5R&*|ERuy3kj^!Fr2PQrA zltN2<%QyWXjO@sOUCE!4>Wrw6V4vc+e0M*TxAv=VY7IyeN71rg9T$bf-i!PZ6p}j6T%#{KU5%I=wYB!Qoatc zW?_CsFjjFO-+}t-ygdJV)jmAD6;Lvud~a=5I`s1<1fND;u7>No1LC%_FeB1ZF*U;! zP^p4I+SyISk$ua~Ke|oMZcF^CIc2n0LaM^2WD3_CD5^~1f?PV)+%$3TG3ZJ#5qq>| z5CD0~+`y*lWm5kd9^rYYOe<@84ah0@Q)BRyfBK2!)#gtA$xI}~;rLTy6vJxZjR|l& zXcoPM#SuvnFL{QHsh6oWGNN6|Qoh)S%_-R5cNUJ9TITLFVkjwqUZ5yOaZlK*L5gceL1} z=1rfaU*1Qf{BQyg2W-l#9zFGPj-oX4>7KjFsh!N1DPPKjNNMx$ZYJW6~yx0gU zG;!#*%_IYX!wNgu2jw#GZ9pYXM08f%0CAselfw0uz{woxYv>E0zKGr~AXr3pDW;t5 z*5R7X)SBA}e~LEEf0RILq)U<20J?T`M7iap%^PFnY%L{ zh!zM2=)RmuNO-9Y96#4<2rj-IxtALN(2;p%~dP&VZnYDk#rF3DwjKf;Z#V}N6c;RTbtg!z+ z=nxGkNyA0Va--GndeP>3*&BJjVcayiEc~EJhjP}*R|uKoyAYBXsc${!h1Pe6_Oeg6 zdJ{~;VJEUSb3M97LG3Ax8}Q_KKyR|>)xjPi21zJiQCJqA+WbIjLR^4uBg$Qg8JwN0 zd*}SEUpUrHyTehZOo0@0Ce9hlf2^`ulC1KP9iRowQez+WKc;FcwA6A*lzFVoy&wqZ+D~6kD=S`@{R}cK6N<0gluuvJ=*~_6S)Wkap;nb(t zi=Mm##VI;&si#L%HX2v1m)IwOEar3*@_Fu~X!h^sqvy>E-g-lbcTSI-DX}_n97%D$ zuTC;e?5=wiQ0C}6PEsnt8tl>ye#A;EEd{E5h5dqPE(}(E%|Tl!v^$Di{vzg%6Xb@o zf^hv4$7tf*e9JXcOE9aIu?z? ztWHx){xsV?vlExlQ=s5CW)ZZOy`!pE7rt$?tue@eM(7QctVx)J*?OwveZtt3IrPTB zObQ!>hB1g`4NR8fVhUsdTo9%uLL_;eEpb>nB?j2rS=^?AXu5W`#3Eq-l_9DATE5}) z)|;MUm`p014fjUBR|+MnGCGR1V>Pm|`&JIyt+mN9*R@1ifcvP<&Q?kYylRKMMUaX@ zgLWn>?ml%O+`9k|nw9J(J-G_;Vg{qzC4u%(0XeFUuXES@<;}4=vrlD${UaJNVa9%2 zT;n|{F)iP}8j|>%y{;1D#G)VdV4~tp3ST$){wYW@-WKG@-CF978Ed)No-cFDu;R`N52lFH{>1-r3$Jx{Z~O!B|Ml=r+$vj6)#|>G?!6cgQzji*Tl)OFrHl2 zu}wNt=`fj{tAiUCm54mbo-P#YAx9NC|tC=6dOIZ{oc}hjWWK9Xv-_LR=qRR`c~~?k}&)^h;v_ z9S(UXG%Q1ZqXA)@3sa+Qy;^*)nKWh_iqa_qAt2gwD^c&CY7Yzh*=xH?fu4gy`>_~c z@HCgE`EI4cMk{pdZDz%87FO=KdYylUq$K<=2o=+% za~8&Cjq#!>X&Xcs`@%mgSk5Nk+~X-tuexhI!o?VbS^Y; ziMQe~Sd0>BmDHk4)jYw~(NDe^{QMXV-^Nyc&>H7%Q0ks~c6`Ua7Ube&=}|cK+a2{h zV#l<_Mn`!TDb?c0GQujPq4p5G?9=-$;&UzK!o``>z8?vRua~cCDHAaEDfg#HX}jZN z$Uljc3;?IzKSWBTaU1R2Z%s_exv-7+=KZ zCk6WZtsQz3N&(^G-puqUJk(eCCX0)W+>sgh=7QeM$c3fIWnS!enmGa1mLEaqCm!~8 zwtS=V?eDXJIbZF!&d96`#?M-_%9q!N$VMY*OAp#~b_yt57C73*7NKi)CPDAk+xxcY zD6oac?hk#tzno9Y0mC;ZDP^@~cFlQzXu(g3o<1lVuZYD^|0;kSMt}49kx}j-DggwQ()VIQ98W7SpSftncJT8q}g=WMD?b-Od?z%4~pQ}8gVf8(RUeV z?IeePiT=Z+$o@P^IAV8+;lNTrwvJu>*B2~HPJ1J0#Gbk4>y;ZcY*p@{-72;HbdwxQ zh=_lItJH4}22|QXSWT2aeduFz<*#RrKXktRjvR!t<%#N#NNAEMfoakk*Vx0#&nS6h zS8edNN!ISmHQOVm0{_aJBM70UeJJTROSgvdT|6aHlR&+w^eS8}TDZrehE37Tg*BCX z#`f|Bf!Lj3+fcnk1mi#jvt{MJ^#;d|GlM7ep6m{iK9PWkVvlJ^(Jhf)ZFaQbgkmEa z8C1zVo5O!Yt*WlYo>422cA8+hQX7%!FOAoZ^Q9D)kG8MKTEx3PKciNF+#~Q$)(V4s z`&G>#e#8~4Tljtx*L$O6Q+Nej<#Lzz3Mt?saHzgas6?(0jfq$Oj~JV)D}^^Ru_Ril zC5xl(UM|sB@WNjjW0R&+yLgQ-yG#1pTy~mq=Ehb3bTNrzK8~q=k$=_Jj%$>S{$KD` zda(ZuZY850S$PD#IbIqoF6K18eO<~8F*=GmRC*kf2MH7t|Da>DS;NjQP4fLTPBye0 zxM!mZln5()2Qy`%F|#P%47G80bv21umb@dtt+YdzE3OpCs^L$paElIJ{*Ajr_^-Gt z!@f+-6=Fv|Yr*t)bTc|bBHg(t+2(!Ub|m>g68NOthS`pc4_Sz7rfg+DFqyw@>}QvF zJipV_tk3awV8n02YlfJ_B#Sl0JSu?j97X-y4_?vG=QFn6i^xC2e6Y&hU56wV8pY$O zOc@ar_Wl{klKBOaWk^E^Zt)8w3pkHn*kHKjXq@*4POXhYfzyy5EueKU5Xf1cu(05u ziwdF05$m4bHA?oVv0YOrAEJ_=U{!sjqRGhb^S3xVaVcKKpg%Im!$~M0P8z+l@xCqLT)Wmptg2l{s4m?v8s0@J}W>}Yq=hA`?X23 z&bS-2+!9I%*s$2jzI&NO(H+fkR!+SKeiK(&M1K?rGC^dq;yqt#iP= zG^C)dGP0BlLazBmPrR{Jib}J3Put-;!D zg4qvImxe2LP9|= zdUuWSlByi%Ge=RqkxD^&!$(5u&%B_%f%mwL&{A8tgG=2NPHr_!xI5yE$76ONarZk> zy60Hg=hyxvWMB~mLt=f3u0cLy{Ki&Vep%vY(TaxrJGACAbXdA-S!c&2iAC@(r4BtK z`J0JTk|jvwbzUWXRD^2Uy9sQB9MDWpR3E4(d&Uw<1)G<;`?d+>u~3dYI=aSe?HFOY z^IJ+swrwk}?8d!)oALq-qN<`Zv&0j190XYT{Q|VZY+5b>lg3KSjkuEHm$-|ba9e+_ zUIl&9Rnyhs&8ZytE~Qi(=yTd6^6YV|ERKx5pTs;G34NDz&t8Yh)3;1v`430g`^7s4?b*=k5Pj)0Cm8C}Z-JV-?qz z%N9XViL0|2x7SPS_@dgEGWY8bYKiDNHS(lKD}h0G5|(Ko(-Gc%LeC3oF7~gfzkR~w zw;tUfN)F<84r#936bCfW`Ld+meX zR3K;HQdtmQYx36(u_j;-7@VC5I>iK`* zy~azfn#uHb`w#TA>|arYt`AUK$vMfYhi&T}mecf!tPN10T{68Xd;reVJY&R6IMcDE zRWMul-2P_xjh>n#0-c1TfOC$SsFJ;l!R$M_T~JNFAkfND5ltWAhaJ!POTdc6tz2QJNWfQj>1b7$n5e+;8( zeiHnm86NWSZHtmz&L&1^3eURe4%D|U9x}oI7F5(v*w;>}v2{ul949K95 zc!|L0*3B<<*5hJZFSNZ$y((7_$Q1WH*ti=0Hc__&+%1|_a+do_xc0Ag8F(o1rM$PJ zGd7gB+o+nn2~RYS=r05u;IAV)w)x16Ln4SQkDp6h;>YgzxGtYEVZvNwQ8QF&9ZGd$ zozs69Q>IyIDu7YDByJBchZ9{(!MxKLB{DDIx~ms3T)}PZCVhlfA~wenShUFKm+skb zYr~*7^w_!K%qbq8d92nEtU|RtoWaI34WeAEClX^UM(%C$(&@7MH`~>6s#-hXO#ij* zN>n=65Z#_4;fCd%i80;tdDU^n=9wWkQmX9s$B&nQRiAK@qSBtUN$`GN${}%mqTG#5T z$6))e714=beB0XHMtjyC5BU{IP9uSk$CG1|9*PMc_SYBvd7$l(H@!hv{?;E6zlh=h zOD&-f%j-?X;rkLT+O_vj;6^IxQ#;`xdxqeWs1;zo94Va@bNObDc!8yNq(cx2W zOhGRfmEo*bGrf6Yt^IgrM*)agU-#8?;PPmdNFx>;wCN~MvROI$Tu8VkXXKHjK4DUk zy{3P$=>wZ71I<;}HGGcL_q-$BpkQpyhX(uBjaYnWzvuP$!`2d`FYiIM-tkav0|jw_ zjA?;UY4dg28hi!bOq;z1fkLhH>W+Qu(%(&T6x@zI(~!Pa~7Fpl1RaK^0#zs#H~*_uCT}r&z$Gw zEA@HJ_R+ee#CE8cg&_YG8D_wn`K-_S0FiSci@Qc;gm+Y3=l&G0G0_O-N02?hd zQYgu|--KF<>dU+eq}pB2SlGJgudpx+@%4llvBdftL-az%Qun!Co$GG~0 zPq_C?onj^lviQ!|Say?gWC{uKmoqFL+pg9+JULA|pVh_nN-*eX@q#N|+T;=)9=jKXe-hY2hgdp+9#(SHWTWNFsh*fdk{@UeQ{Mu5W}Yg(oJzCYY{ zINC3kXL|Aj!k#m7@&(fkvo1?Wun@f(JtgndnUtoKEngGA@xdofOv7X3kZL?x(=;7@0j}|iaU*%OPH!1CE$-bB? zTSFr8hPD||ZFRh*s-q&syrbLEUGzvy(TNEEbn3l2=HAv#SE1P#sJ>T9WVX(*bC@yl zJ(8zW-JM6V;YgJrp1zk83%yS5Yq%D@4arq5y~djKMyssuIs*%088+amAO*HguU7be zkO~F3h?_LMNHUj}RzgIi19n}9TLfpm+P`|GWe#v8bZ&2Nh%nF_;kCbl73u3Ax26v; z%kg~aN=#^^nx1M-PpDU3VKa;_(G8#lh+u<6Iyu#(;UftOVAysJ= z9t#snK>14aZpIQaxvPS3{&QUm!LaR0mqq4=- zAobW=j5ZQ4s;(w;rh}M9J%Yaars{R6-(1u{1 zZg8w4&8I_7w@!xJCW9Y+r&rNlk}w_}NYO*Q?~U8s|Z_zY$@&RH^rx z?+uIjJHHs6u-4?}7BV^d<`tpA?k)>G)<1~3dva+*I)mr#embSoio=EtG20+9lrQkI zaa!ufB0O!2mo;m!3IV}leitf&UE>$1Df8iWbxJcSd|OTqNE(vG>j2V44L^ z0EO@e7yOg5E4b9EWW%!hcS_8RKO_B$)PwtsnKD%==P5#|Y~HPVZ%Rfh^jz;CgR=rk z7GB48daLV``;-;qPnn8+Fi5c;9;$glaNo*Oup< z%k9J14e6p@Uem?KFW;?tD2I-)Hz^AHA>sOMO>sL>>5qSn6M5u$X-ffYd97bu>MPA6DfykeyFLCGEKeJ>@1!uSGsi z3fOUjKi$4r^HzfR)AA%Y=v=H+qzp_s!?BnJfP_A-=%C$1vwF6ac&{pM=eo?%jHMDY zayCySaQXFhynZyU$e`_ zNU@Tj{mwxM87EVqAfxL;8s%-@e_Z*?7t zHzhsF4Ua(RWK*$y0`LzSufMY%Q1K(pD{>fd*-frF|x8>vuneepE@X&9q@wlRG zo@HP4|MFh39JzEel2JiEMF>@9igsE5Wo31OH$i{Pj-4YR5cmqgdpA+><|`D5{*-?G zn^p_K7b=wgwNURH$zPIC4d)-@{*%v3_M>q`zcBi*;V*0B50RP4Die;s@_FT3S@>%- zB>#=ii@F7rO4|L`{1+6Rc7Vl$$gkgdfBgVDGtZ*1tS0gc3T{SaOZTdd2(Q-kZ$Y`W z8?#=Xd&U$j?t;H#>4s7{XOa@bvC&^zWYz3W>>{3p{)!^=TlRdyeNz93Q|nPFY!)b^ z6o7=39(vPx4~WFL{uo%mE5*Nt*%eENl%lUsWVo?$%ytZxq-q}1sXy%oM8wPo;3cwzl-fU3Qd1%qyOV;I%CMt3 zwNz>QBRFt_`*O>3P^_U=8OLje&#mVCPLDp|1r^9|C;!jCeXhooe~r!CKAhP7ryRZf zAOFuJ8vwzzzemz1l`#ABibKe)48>Jk zMWA{<%smtH2&p|;IDY<2h#Ankd+~Asw!`s;%;w6}+g0vtAk9usPPJke_gsN2=~>sM zSu8dm%C5tsawk~{5!(&1*N-_M zDY!T*3y0!N3cN)WgNt0iqzJP1$F3ho2VlAkIa)%rz?l$_w>I{T7&|%JpAk`CcV8QW zE|ZQFIbEyRea%>Z-lH-4T};Ij8Su9lWW{RVHRh-PoO&J*vUUO~Y^k^R^D^08qIy+j zH;&A^V;<28K{=?KCW*oYx&{vX#_s>e1{!F zqBM{G0ff_uU$K+ycwJGvr=?lYffAeuQF6)~YtM9*Q&wEGz|ON9jbcI@$5#|6D;|i< z4=9)aRWKbe2?zTJ)77TaQCpZk#q7=xZJAEo#PUoSD>pxkwy3k!CUm76d z;YCz{a7|DI0CjivZ&H{zHt;olD)_as*H2XvJyij%)&y=1dHd`Hgr8VXP5*dDh0sIn zE*AQ^iv>q!Ozk>Q^i9vsgm9>ieZk1+^wR?p24f#MH6@Yu-mu93IWsOMhW@)o+E0eF z+W$Y1UyjTq;So(Kqhdo)>TSw<5#OSt#bna_Y}NREMRZGLz~(P8y>>`z55noG6cDJE zeVIQ^(W(N-7JLms25AjHJds5&QyAn;=)#-9@%N!jR#FmToj%cPJUz-XoLJao7d(bP zrCKCahjawgrl4aV^9XJhDbI5|#%JQ<&Y!sl%SLYKF%NCPs;rl1<`7lW{7U>9@lHLff{5UV-yIl_@&^+*d= zTbglw(EU*)k(KpZGTmXM%TE9dpd*VC*|W@LtgSmWOo_^&Y95qdgB6`Y0azE|KqI~N z&U2nYKsuA>4hxg<3OfX_*NWzx)CdbCXc?R)E=9@M`3ucCPgcHUN{CFzPCZ;~KR)57 zz@4cPd7_UgN*F4LI1zPPv&<%G4Xxk@5J`xI!ou?W?!J(L<(j&s<`MMf=h_)QRHumAw%Qof4=DWs?}5K3<&O zixmg^XqYCFac+JzRAUQA ze_}hG3|7m7iPvlxNJ}+QrDsJ6$l&(Pr7PRu1k)YM(_c=|m%w-UGywXLhusMyy&N>N z^RSwUUv)(*!d*JVqauRo1Qel}qimIN?2TS_vH7Y#aE>eM9#Wxpt;j02f zeaQ{nrj^-8g0m_&6_5lOrDdygGg{@>C}~e{-D+(yYV70)be~xQBe{gF`7434TBDSC z3fZ?&evDgPl^`iFU(3iGbKj7V;3Z$h53?T9HBs_v%(KL%qy7M_Y78%--`2M>MztbS zG0+wuhZi&-gvzcbJFQn$LTf|>L6MkM?Ex`)LW+dQl1mUGWh;n@r{6!kS8?uFk9i&z zcSbl;$kV+j=F2%U3{j5a^&)dTau>$_zw%z?_H2{u{2+_96htADX2tO2kKGFwTsDob zK!60wER`e-@^2bB;yT~i*}`}B_P#gQ*2U;4W?v9m;FV>qXHl|#mS2I+NGRF@@+%78 zNi4XULRdAg!SB%+nA`qnuyNyb`U#ET5lo1Un*W+`$Na%X8_srsGSc4Px5+Buoo*q_ z#kuefa_|8%$(;u7?a{#x;~nn-0I(#za!%gVtrSBc3;WYi0U6? z<6(ESdF6!OO}r+iML6Np;Nxjh4qw>^!k$2b#oofflyyhLUpV?t$pjA@FVEp;j5UE! z_3FAtss*Dsm%mqGiu3h0)g7O~Q1vog@p7 zC%gQuip~;hUN=ImZdbx-{~|=lt0!pxOp18szJfS{G+cEDYINGMj(*V0#Nun-Wg_V$V)l@yd%`r0_G&2*d_9Z>_24bddu!FSH|)U^ zIicmoqw?AD!{&rv)C_KJY`vT0!^2 zyT5!#x6CP{Tmje4$Jvzj%+J(R436_UtN5Xv%%}KcR0ir*_6UOl4%Y{n+x1HI|3YV> z9r7b1G&@m>i1zfSJJ^1W?!UtlOAn^!K%-`*#ijs;CHefeH+lyv5jz8lEOv1Z_c1JW zn1u*H4iQ{RTKD`@3lInLfmF8L34Wo4O;J5j}wm`ue=JZzFYRs6VHX zDb2KQ<;p0IVLc9X`e+`Jw*fhs^|WNiuVuVnE2|kN4yy4p0nlMYSTEFoQ9&cNPiKjE{TNvPJUgkMDixKYr)m$a={o zn5QofI8Z=hatm!XgclkBAetvDT^1s<29?jCXy`+)0a9+nsfYo^*+-{fo+V>Pdu$B< z#TyRe6c>YyWK&eWF-x3QYHm(P5#B6yZ?BfrO;1XInS*o3ZTb&>E8C1};epvLN@hR1 zN~)RMLmgROd_SR_bu#9u-6l|A9|1OXbic3WYmZ|(n{UnW;gw4gcG-%_`UacN&wRT` zBJ!a>^XV3CaqasuU6 z=;|%kQtG@;gMobAd3va0x+AEN+rb2zLt#0-S#5gMX&&sT#kG9clw;E5t^WDg>Nct2 zLa2;+kGcYCbW5{R~6e?+(Lr?yfveUlU3$CV$OrT=&r)M>lXXGFv$%$S$RqD>D5=UVDxD6!w<7 z7QxUZb3q@2^!428#9VFMY2D%C@%_wGS4DqR|1)Lv_JcKh#87xtvn_QRo!YQ^$|oY+ zi|#icmoiNCbNEVDBS*Hl^g{JToMd0S4YE5aX6by>} zzX+`?@GU@#L|M9=bvNG8iz(25oo)?v*4d7f!-FgR94W0v__pcytUG;cb>@hB>W(@G zc2&COf3;arhO<(VAW$z5tj(H@G`Oo6VomAYAw~<^iEJ@czj_J8B9URsqWhY9yk3tg;iV^>e~ zaqj4emMcPL7FCLt99DGRA2utZhsyMSqN~Qa$rH++W(;FEqe`C(TZa?}&!*s-RF$SLSZsWyNHULJI0&`8p^ z)l!7WSbbYKCLwkC0`+iwB`py&zzseY#Z`iiQ~KcL`QVO27+F}EPrVE_X}Y9<_?St> zlmM@P(Jq~HWkiZA8Ue9cY65>m-@Vva{hWzcPX;}@SVUteY=B4>xHjwS{w#bQxpdms zCykfI3(fBHq&ED+VKq?!N!-_)ekrz|d~}o`Z+;`CoJJ zfNg_6bMU@>Jyl88nvcUU;*`BFm8tl)Gnq*5#ugCRtkM#?@~6J)b5My$z^lX!LgJMf#@4`&m@*0(fiF6(n=mTS{eUYewtBm*@hA+ol#& z4~-L?;ggKn4>))MSA9=x){SfU;6Lv}S~e#0faG{>N;7bc*HV0TnD-s^*FG#xwpuHV zmbjWr@hYqJe81$l9}j|g9r*g}JC^|P1UtuTl0rc%dzL7VqI)4^P-VGif{?4M1Olj$ zy0H~H{!mzzi4bi4Ppng>Xn+V2tq0Z))+~%=8mTX;$~Iu6Dw4V|K>j@oZ>K_w((zeF z#lDV@04X8kg~#Qt*$1_Dv0CJ*8y#eK8F?xQ^LyH0;hHo{DJ5 zy}72U$gZ4mqnUy$@~H3WsPN^7{YsKknN_3uWm5$vI$ZBTLiOd*Q?I!@I9!UwVS_Wo z-rt&c5&9)``FjxrO{RLWNO{4i!c~@WJ7#Oh&)y`4?e9$y(J;fO-9N9)jmBYM?~?`k z#dTlyB)jO6Nd#3%bR!YBUE@GQl6Xm31zSCON%5!*nPw46@|coTf3M z($&_Aj0U4nM-(k-ZIAYf5%*;Tq_=zZO80WTaS0a<$bmDP99e_ZgO6`6fu9=8Um?Z7ww9$bNe7Q8*p^A8cB54UUpg;Zb5W2n>cB3v9ZIW$y>Rp%C0riDtx)I0C~KUqa{n#646x=< zGfQkmNhhgq^S%puu4gzO&Dr1r@wi6>pUNxce^h)`SmIr`9I@N@g>i~P3rNWO2X%BP zU_niVl<}pfFrz7T#EkE|QsQ=W8z9EZJO&Htv}^A9@+evAEdRsO{tGgba1SEuH9HIr zujG#V8@xMDIWWumypVU)2^{ksZAxkH;$`iaP={GdDL~M;rOvmGx)I%Sr>K+N{wWb} zw2AaTFhVFKoR0gj@XSJShYN_=m{XANwY@Z{wMJjaJ9hZEv!tViA9UIwTJ~Z>N?sc* zZ1b_1gjjP~zD>hs!>c^Mt2gHLs1M|gXyOeaa0z5JFFwbk|FK5g@VI0B(8s;g{WEIw z2PLI4*7ywPw{wedHx;7ADMbgBaljfiHD(7`qttoA$g)j%gOpTj`N7?(l#W=8fqXj8 z$ZOm1<}-w+2j`nOJB^1rE!u5D^v93%Nz=oU+`)jAs)sitl}lLv5;y_&Sl2+E<|OEL z8`rv=>4Splo&3m~=T^}&o1X5l$>(2kg6p_3fD%2&@Kw1ZaOp$w++@D=;S5D_z|4z$ z>x8ga0&?Egt79@tv^YVA09MTtu@cMfb0;aM@9^+k8N-qM(KC|n*~F$MOr4!(!zIMzX7 zBM@2pYyRi%VV&(3zt^2qyk2RFA@a{{y_YjCV0{+h(0$8OPCz|Z zOA-HJQsJR#=^DKAzBp&1CDr@Pll~*tI4DTYz_IuTuzTDM(07`|hb(@&Qn#uok3-#W z6=KghLPDo1p$K74s`{u%iIa-1H%6d%5W zJmwIbF?Cv*4u39}_x30-^3{nT8zV#G#|K9_&5`-#LBUZ?B>H;efGtt8a{u6b?dDbpfZS{R8t^$P@`(C} z_{q>TTNoqxzfQ-K+#3HM)A9P&ZGN4OR~z7gqkd)7SIalkin@HC4T8lJzEU8vY-##7 z4eft5cillvWovu|X$GYC4oWWuDJu|qSt$V@N+)zg0VP!Fgw>@)Km-;rDkRR@XPP@6FDec{A_+aqpRPzVqF0&b@Q*x%c~>->=Qm@kbaV zTvqcq6H?;!#&s4$XyiM6_Z|;{(3X!Ijf}5Gs@S8nhp78d{JqV>ypruAE~CrC*I!)G zvRR}#%uo0J9B_$lc4a&6M$R61(cy6vumoprOi#O?^s9pU=b-noN=&?a#NU>)?4zCE zXfZ`pm1?4G2kLJZHd19ykYC$GkCnkep{cjbQOv}i;*4EWs~%M`S^fLm+^Ow$&2uF^ z#zu~e-OQmT9$Kc|NoF(72%lBf-?+3-vG3vwnnVkbbjl0ip(`)dlTWU!htr~DXXjY! z=+U*vS0LVjz}Ku=l6+l>KzdGUDo@FUX7Y1f@7%v3pk z3i15HAiDYaKo&s^4L=t;F2w%2rd@#ln5fc+lHV=*_}2B^AE1C>b*?K3=4tOA3nmnG zPe{3cnOy%-R8T#6Ro0e7FsfhrNza5Yq%teT$tq1gE1L}E`#Vfe{4nsZ&JuFdOG)Dm z%vq!EAE)*L>N7$LH3|!SeIZe5=2sUUG-xXbA zvbwr=6f|YH_%V(tHjXO&gqV9OJrgFWLLl;N#e7TEwHpCQdvwIFjK#6?D4o{Vqnf`J zL#^#0*P78KJWmu$cqSU@#i$feLCs{J|8udVCdg@EaRvF)qqKzdg1Mm!5CRCwA#9!t ze^y4d%!iacN*Odne`R|5c#&vT27(Hzo4eNn81-DTR9ObbH~66>qjwL`Q--)|zf+Yh zER4KtaXO0fE8HHJJMI|dm{~z^t2`J@c5b+ef6x&;NG@(}mb^+wz%J2ZmsHg2Ik(7m zx6fS9x>erxQiI77SUpD2ey$3~7}-QlRZSz0h4mCry~o0O&n>=Z#FS;u|2%M(1 z&l8cJX4HLP0T|~Zh~V*?cR(qB=vEMxjkD#J0)xGA3M``kAjzNWcT$kUKHN%m-N_wQ7|9&Wd$#Bke8T|oV3FpdbOD$ zyE;HSpmHMrURLxJ#M?EHsXpg&w#rSf8>KO#eiBAl6Fo1Y%6RQZ-3Q8;PSc3Gm#uZe z&m#Bto)ESMkM%5E&w3A4ku#LOu<#X7i+{tijclp*9tv z`f;Dfn&a8v+ft3we~+n_fi5B8tm%-7J?^bxX_fj_8ZSr zdn}|}L=1!byu*m@tyt4eh@L_0?YtW+39`;4X;?Sk+UBiv&12#6X2uiM_2vaeW$ymurMksqXzV46cwNue|1DYDp{_|(inKQ;8V3z ziYl8K4l78Vhj@u-1(~;n2;|3j;#ptioqiOW(|*!k-^WTgi+Q7?@{4T>8PD-JlbVv% zJ=xLT3@(gy6#ZDt)ld5JiCCoeLMe%QPkIRa+*KxX-g8hHIuA&QxF=XWSA+TlK&iUQ zLf4D^J>A@fC8jT*b9HyC)hT{+qe`&I%Ba^LG0eRTc=!9m!^89DOJT&oUOLnl&E3cY zyP&>T3t7qKwi_E97GHcqs=2cGE4?0%C{9X{~DAOd|GGjV8_Dk#trFLNF-eHsm-LNOYs=|13UYh74K@T!;>I+ z*O*RoadW%-_>|sJ5-Mb(QMDFo|M0x)yO8EkM;a{NNE8~eW~W#sNJ86hP4pQVAiGAu2RNjcs&F^ zQ;vXotw%>Hhu)mEU?6O3W*zsScy+;@Wusr~I^$FM1)KtBCrHYeF ze~b|_-0j#{7~ThL&NOZqxWC}Cpk+O36$!~D5*;J#az{vT$ywfW?;ceWcK8GbHnB$Te42#7U^eop) zQ!Ia|)F~ZF65wmn5O=`@*Gve^u<$IIol>Krw9awbLLj6L0O@Enf;tY$dWKS~V@Q9} zoG*5lic(|A`bSVD^iNLQUcGC1QjpP;Iw^39{t;Ue%m2s?n;40A==&-vE>_?(vw L!3Gt2E^+?^NvInz literal 0 HcmV?d00001 diff --git a/examples/getting_started/tf/figs/fedavg-vs-centralized.png b/examples/getting_started/tf/figs/fedavg-vs-centralized.png new file mode 100755 index 0000000000000000000000000000000000000000..295645c6750a255de9e673df559f15eacfe8f8bf GIT binary patch literal 31496 zcmcG$c|4Tu`!}vdN{coM2}wdx2}QOf`)BO@ygJHBN=4n7~VlhSk` zBV+I<{5t?mJ@*1!JnJaK7O#L?N{nK7B+Ir1;H7F=V z>dp1ytErDg)2>i*n60r~p_GZZ!r+v#weG%}1iNVBJZEJvm*`TLG$CB^b*7AmM{qHA z?tYfKm{`361(+D&M-%Q;M0h*!4Nt6}exC$HtPqi;GKr zM9V;tfp5G346coIG`CBi&Nvr2?X>lS%IoM#5&!x@iMLPZ+)M@=76(^AQW?;TR;*VqwjW<1y|ofnK@fjeJKTltuGwiTNWP3; zx{Ni-c}_QS_^sr45rd%9iIuLXSr~*3OM#8KzcRW|3b5dOjSfD^Q5FJTjr$vDTh+34hC#!ELdyJu@R zZB|hu+wBYfh|}l(C|_n9GNaz?tcSI{`9-= zeWPN z+2{-#`tahAHKyA*`B@8l{bi!mM4c>#tE=)WtOlOq9_>7V!B`~kjr*q?HoUeM*`88_ zracZBnr~#Z{V@=0nnas`6!f#=a1ff#PSdfmDM0;bK1ix|F{kIsu=hkx#B_;h;ao>gE zXRSBu_3k;()vO{7KKT!Kip%j-t_KbDhCtVV{O*CPJ7yeD~xMw zWOt`RaCOK5yRnSJ&vzTI*{N1YgoW6|+oT=T#6DKcf_koxJ8cIrIKA7>=VlYx-CD%* z*Uq2B&P3aV0;r&1*yWhcG)WBNjT4kk{R5TL z4GVtg%CEL~Ulx+0L2|EPrz}&C??gymHphhtZ#4A=ZrvUibBn)bD5D$ccfVseo%t58 zOVrz`D)Gn@Dt? z5jJyKMr+6>B~V@qyEl6DxEIV5TBxvkvY;FCB?m5*SkRg%;#He?txwOZ22<;S!YX(r zJMK7sg!+YWY7{tL(0%?M>kJLxQhkx9<2scwn_n?pg2}|KZO?hxTBjNR{$5X$IqeFI zI|vc7LFN^me~DWOWf1Hwx4{njDEie<=CLUz>71pEfkC;efBS56jLxkdV@vq{_NMk@ zO+li>vY*A(=n82;l-_b}hwSurcW$!0bG7wVpH!u$*TWT<)3eA*6cPmx*iuu))h`$H zDzlPYTUN$JcGttmb6pSOme9Exb2KaF`cD*p%pzep85@T>&+Vax5PIwZic6;x(%j)! z&?6St4|&}mWy_iV_TJa8dYGr{eS?c+>K+$9Bb&JD9cKUml zr_r3pRAtk;7Z(<8h%U9JN(A4ybwU=4XLOsnO~0#JZ^zLZ!%c+<${9p6%DSI09 z#Z-@Zv6d4tfQJ~c2cFw~BSHNp5R#TUiwTTQY<5I8S4Suz2W-jVgk>x9htbE>`$f?o@cW{cUg25I#%N9+IcM| zEVo4!pW3JpW^hHs1tO8~d^E84i*DW1NvEr|18lsJ_TjVrgL4$R$Mkuq zHRVt?QwgtqP#7^y_P9}f9_s{=b>%VnTJ!?eA-rjY=^MfIy&A4@!+1VHp4zQ6OU?T{ z&o=C}>*1!eEw>Ptq4y)*|G+oHtQKvdx2cQQzb_e9bLcNyXL9|9JKhW{ynCb=CpQ?TbOJt$TTy${5*5~ z{`UH|(qY$AU>Q@nR$W7{>yQ9aKjyAmjlV;W$~m5aMri?b?o>jB0HkkbrJRzI(&-3> zJ;z7QV5qq9vL$PthOSEnN`2l*&UCm21TZqw!FXy=seZ_Tx+ZFQBW z2o01$$t$DsDJ8d!COFj8DmLLtHc}hF_apB+{CdKg(~!0`(BWx9o$PtUN%Z`0j?WNC z&b9AwxhE%yYa1@pucRu@DdQq!aZI|7CMH}>++<_8&yDvg`pG-Gs4_^dI$vz~L6s3| zhTnIaqpo?VIBzm;PfFG+6Tj{a=Xw77I|pN&viKzn-J}}!AZBx&?258qch0ui=s}|R zqv`iCr}8(1wKl!!U2bmfdn)%SrFbb?yE0^bS==a?&6iH^-{sbsBIcWS@Y%1gvGr$q z?mh%?o%o}f|Njr9{eM8ef845`o06h$3cy~u)Tur%ru~J&nwqf;;{NTA56kOqww|RJWQkesnvyma zAclI~d&Q@uap>HM>E3)C!fN2`rGco2U8^HrBSPCt-SuZ4p@E>RgbY3&)@DHxA?YGA zNO+J#7z~k;KkADJqWb5D9`z}NY90#Hm!sM^{`2Qyw>{a03;##VH+z|IOU@o;+pmQ0VIKJve& z7EEJ~GyR=3OeglAZj{g*#fJIYxcc=6|2A&>{fSF|Tp%;z-TyL~W=QyX(u8TVQc^zr zZQS{>#=nirYy!g_Ea>m^(%e#ZZUkmqOupo%3!$vVEyp6&^_)WKGFwM;b__+!zEM2f@ zF8t~dm+?RqsVaxYQx)dj{`%a!yVV?u3+rcnF^@7B3R@XaCcM9nS(X_w#K0Kvb z>B{^Z+^0Yg;-{lISYd06W0if|uNCyE4lADXvqe;zS~o*LPqXVJUH37OPrp~+-(glU zuZP=)Va30VVzi_2vva;wnYF^|)%e_d=fc)83D0k68Xqk9x%uw7rtn(y&&CE${OvA$ z{*0l9d-CcD=(%K6ASMR@CvKMc+e;l2mDLo}^Q&{TXw4e!X#W$mPjU8R4FkxMHLEj> z7P*d?!0_`CVp4RBTZXSl4#-SN-&sHRCe1MWc_F*LRDDI(u&VEvWzJ;3DX(5vmyT4)=q2SZ=I_^odYo zL%X=k6y#0EorD@7>|cR+TFaRkgmS>QuD}r zC)SZNUq>Eu7jtl~mwop5SOeG4;!UzeEvzB(Ihre2PB7kG`z{u#cOi1^!JlYGS$?LbY@I zGa<;KJJl(7&T^7_h(h8P6ckx+|LhsmpdA#jA4z627mWB=^3za7z2Ah{kiF4;a`IEh zWd^>%WjM!O^zw5`aaf`jXW1g;y>Qvh-25#MNTvCYsR9q7Q-NX@=BsrASA{bNS60Q& zGAedlMt!%B`rOzj>z;HK5weT=c7ywb z>3Q{NJj5xwn=+anrr5 zomGQR!^Rt(|#Ez<>JmxutgD;a9ovi|!WM zFVZ<^sg(xjQ>IIhzA`&{e?xSTJkcn(N#j*Pw z393agP0q(xsQrW=K*CL;U)I}qx||bl{h5Leun%NTrFtN%AWKR2MMGwz`zqR6{+F%W z9WTAeDW^?L)U*JnOw<7nP?uSGzSgKz(N+j^D@&CBkzR<9Q&<0D1ZTRlz7=>bXhySn zec2P{A6Fai@NC@3CHjr=F`gXRQIEo-Lc%#dik(hxIPR`{{SB5vJl@G0- z_mq!#e}sKhx|#EJqf9y4-xvMm;{cOV8D+<_k6rNZg*><4!lCuBR-Gsb)@R=6B0BsN`jemcXx{7y`NMP&e?sYli*r+_Rnp5jLitRu*5qwEV*8N<7}pIyAnfG9tt_ zhj%5kW*MpCokLrE4wbwa&NKgRE;8}C-21F_3_0|DhL4nA$Y4RvJbNzX|JC!P6s3)g zm4tG()i9B8i{d*yH}6%nt7~}mMc%l7I=`UV^Rxo8kP}yX`S80yXn%SB=^lzWy+Tfn z&e))X?S-gl%Ns4DA5<~IheKq4$DSG{`2ByllmF6nr6D0y^*5g4Ru}m$BT+q@{$jTZ zmxG;B%Ka`a=2@7Pget~2FOvhIecQ+q15_-O>H>*g_B1EX4M`c;Wu9pM%=tJ_d~a;pvHq z6`ZORWBO;X3zOSvOgVk`Ykwjo-*)=LxT37GZ>1$s>uTzVopXvOuA;F+8ba470wB;c zQ*3us%CmYYA8vbUie>w>36v*dsvH6~jXE1ysS}ZjwULWBrZ+{Une#vH{WFc5?yj)a z)2`cEX#H=ua%Y^;7um2nqjbrimK7oN?U>y*!EZxHXt3!6}%3cVm+`x2|Y82GxsnhqlM$st=AYyx{!jB=&{z zN8|{*w_~Wp`#pSt*9fGO(jAtL!Ib=b) z%-vY3M;F$bUpwVnBHi2}!Lr&JbcH-}uwQYIFVwM=4 zhEJP@D5l>QrG7b$gwc1^3_o)J{@=<>%A@}Tb1W^p7kgkk)hrrX{2M=IIyIB|IF3Ey zP0B{_Kzjx`Zl7qg#S9}_<13pqOgXle#LVn7ezknR6#Ifo=U+Sm#ACK<;)v(j!-rp{d9&fI26-QR!QEdEE`%|%zKn;n$eONEK5o=qi->ne?+%4zaUr;tDIX^Tm=B$AI$-l5nd;d&Q^eKN z`6hHcY6g?<0~qlN_tuwGJ}HgBgdrCnz9LvX3DQm6pYzcvL?G5iJ2KA3lD+R zzsbgzf(k)?l;s(-dF$Jo*r6&1Yi(S)Wgjw0$ZfE;ugE~W!f{FklSJr3R@fxsy0gOY zhnlSDOqcvsFQ8)l+sgIJOdCU-bm}L6vk7S5a&6^du)5}|#z6iaegfZH69DnwxV1FO zNac`~i1EJP?k-1EsEw5{8=+8!`X9-!+)BVm@> zbNPLcPg=P{%*)A6>#$!a|8fm-$wcki{M^m8T*ILtC{A8b`>epFou-1M-Y5!BTsBQyY z=u=~N z6QNA-P?4SI+MHJ<%NSri%7}vYKFHz`dhCm~7?kJ8yNN-5-E)^$>B&=^9fgh|Q6k`OIWs-$-?`l_@gpO1i)L_ain}2HlObd#=^Dm(izVTpZZ44ni zk>Fqj)da;r0;tVug1yqIdS)zLVF}Ca=;){jQm*5k)-5cStpVjU)H1G*!i{Y7acMVO zD9`kvuBpZN%J}4|aM;*u3zO&M37$^RLk_{EBetHT#If&u6^p4_Jfdf8=##{2_DVi7 zef*v9wzZX#{M+<-G2E}acICL9(pJdtD)*_gtDRd^G7ZNP7nVk3O~O|V{q6)m0P5^5 z_bo&A0;paA1%L~bnRjRQj_(wpW=klQ<-ezMt$-G5>EagX=h#y)x)&v8E_!UuH_kS* zC6{%ReU==ZSsEEBPt@>2#V?zujp{GW`VRe;PQ*Gm#F0<5Kgcoh)0oVO{o(OD=U7DO zz03}q`Ye+mjW1>jHuf+OFU}qMR-qd|tsB2ljVZDSycu_Es`=Pz)rBO_p_*D;W%6BV zvFkidM5}*97gt>NgZ70{1kOeeJqec3u_7$2#qqWh+AJ)T9vf~LgW}ty87MxfjjNnu zoBMmZpw!>WwtxRXOiX?;9x2jrG_U!B?8&uIgHBV2TjVK2LUbMVG;j*Z#}4E) z9D?w#f1Xx?_`^BWsNchnKv=U6s^}eIoGWk z*}!kv+0u?OL0_<$PYuhXuV$p^p?F`56CCp*5)|PKzlroTg9}J2zzAm=|B#W z`_tvFS&9Vi5AZ+Wa$%?FWsyC}&OckQ$evBw=`l54c=00$JDhfmH@^CzGAveui6aLt zj)v%>-^Vd0O_RpB-xkkbL4MRX{JdyJj%*G5@$R!uNml4Igg34~LzwGqt_1JfYz~Rl zIoBfj%JkchF0}7{;CMv+N`-;Qa_NK(**6XlOUNk?<9f8Ub$IC5Zy1%HNSkPsXy$i3yHc+BdV}GG}hq(cu zX*F)-;?}%dknN*%GmV0)MM{hX!yl@(0_@R$F-?occJ{oEL)q0MX{M*>o<^ss1rt@ydjV9> zZ)d`lL(agtmOR~o9c;{tQM*3&aR1iA!Fdr{J;`!hob-a;Gu@zpPAs3lc&Cicz6M!< zq|vdcZLOuiPlPP2r9haiZV6boh~#Svp;L(xNxAVdT~yAD#ucaHh2FJNVTFXiWosZI z!%(0I=y|bRA0K5A7LTX5wA7wdV%Aa*rxCx#1O;DvnL{vG2rpc75}lz7)b-e#o(x?` zTI^uPFh$x3$XP(;YgfHq-0d5mXRA+iJmfuFgm0<*Ji?bir2(dVr zA@_#kqIGX7B&VKuJ6TRO;HK=|Kg#yhb*W^X6_OTs?zVZux?DaT`eGVsLf-I_{f^3u zqe>O7erbJ4OI7lqhy=#hK$~SMeonVr0zQ7=Vj74^XFhm<=#~^iMm-F@M->eqYvE(pS7|Lc`BziwQHK-=#4Yn0+s3`VCi> zrViye@A2%Thq@SvI#yHx2wJ>9KlEy^9F;1~fA+?X=#Z440!}k##5Rx8p->UK#1JvK zw_0@C1#BN0IcR$2Be6#)D*x_*O9{~}e9+CLosmqgX&Oh5%wB7^GB*r2`RgvNBK;S8 zcR3Cew_*w45N}`j69o*)6chWB^vu)bAZCW{-Iwa^2rR~0`@ob(;V+C+CetO7UD!)V zQO=MD9>{@9@~|_>pOWm$+wdWMpX@UwGrLk-Pt7nzI-Qr=Sa<^zF93sXSDT>x(pqD< z>{XL?P;O3LEH8sywn5~EOPlj? zNxkPA_7zX3OzW$NT9vIi))+=|X4yw5rFNhFkR(8gp_20ZG~onqVQ2Axx5aXX0LgPN z<*Rb{S?|Kb_uf;J$F#kS=m@i)TF0HQlJvSa!qD#;OrEasz|~tD!Nqou*YkYn<<8B+ z&c9Na3p9h(`Ix(Zm2(dHM+YC1B({wsV1ThfYJ90Nlc&R(tV?Z`A0JMi*4j<%Znwvt znlj&9{Pne@r8Bh+XU#}5g>%3_Ay;oRLHaiCQKSb#aoew#6%{T{47+;7VJcsElbUQs zG!Io$D6=iMSz_A4JVfK*mfM!9$rZG?y+_-35%N_+OYnt^0KifS;)Ct)n0&ps_l1Hm_#7*=I!QR}g%>e=VT`dkS& zie8JmV;n@(08w(N)66qxymT@KajueuSxW_>6Z3$ zG9}(CHMWx}i|#Go0YKzTDAD5XbP>32M2mZ9q2}e*d{v%Oa_nsQHRpVd7*}L30iTfF z!7lgd*%Ec=bQb}P9MZP6Z|(h6X@7g_Y2kF|&w^z*Q-&*1uHjRc0e^-hh10IUof}59 z5KvP?KFmrGzSW-wzkHM9M*7}6;$B}C8YI2rCAt8|r4JH_e0wPooz4%4G?@cA$iB^D z$>hYPTTmQ{MbIKDMfql|LG=RWUV5pB_4##ssdzqH^8-0^0A)(%Tqar1b$VbSM_|Po z8g3!t8^O|vzG5S37WXH$Jl{P0FzsXMxY57jW?MH4GPW#E4vsPpqWnub0dbKeI2AKBFBEEiGuT=iI)g?F z2R(cC3_2&gjeQ@mir6NAK#-E2#1QEkOz|Or6GjTqqA|--#k4Ytfu~oT+wF38$D3G6`ZW}gKg6s?h z*i}6DDAkf0n1yO_0u=_94E$SxW})J77DqOzWn&I!5!aiHi6P>+wB+lHKL!!jIM?NA z;{#KgrisQc%u2Lvs=oB{583&;Ei|)P(`V=L$p;Mq#86=k3Fp#|F~lf$)pj44-8>Y` zW~Nm-ecYsoq-WXbO@8S&wGoaE6Yb^J`Ji`&b6uG+-%KjJ#K33uF8>8Q#a@c&Gx1uM zm%1i?-94@lcuM|TPuA`f7aMh{D^F#JqQ`n zWzmG*QcdQ#^cUv}#bL$taL0Nda+{Vn?1Tw5Bu9H~jWHsIwY4U!VO~OB2+KF9w(W8B zHa2SRu_@oC2OZzDk=fO4DDFXpOZO3e!;RxM+k?FIIlL1;qZARmg}IaeL)N^Xb}oND zr+An+8T4m)z)*z|N)yN80mW~E&@>&3ve3iHa@X{nym;#ajWX+0&FXOcvx97Y>2r0v zwQd_zS-vc<;9|Z<23!FT8ccKt1djsA#t@s8tdJq>`BaXi{wotsm*_v7$!c?o8- zLfslGZU0jY789MR)=W@J$$(20LkL?>gfFJDJk+}HuKYL0&$m6Sj=0HxOQ-Bgz0x2^ z)Ho)FmN)JNhu^S80!M@;L|vP$ou`fmp^6bJ$t~C|tz1aEt=#>iM$rz$xv3z^@QP+!;+S7Ii#or(zE2hEzP6{U?UZUDHTXAlzM)lm75q+gYur~ z<8Jb4Mhz{g{2Q1VgzO?(G3CAY(z`{h&L*fh zXjNF-M%``JE>wi+73i8{NVoU99hHvY^}mD~YG8&U_!>jpct!4MIna&sI(aAvV-dX4 z-GpdK;&WzKyUkWL5 zmr0abaG{BX=yoLTN)XPAJ??4`0gY@ZrN^xkbVN*+Pfv>Ay27!FldHn1Zh#%zwgQrOG%CNm)c zn=*;9@Zl}1fbV8)2G>ZuBkxJ(s}hd|=U$BtjM{n?Lqo#ETjcgg_MM=U{MreK7f~=} zFBRKNWRZK<-~dOQgr{h8mACN)ZM71J8X)<6%Y!@vL{%tSg-vyRA&KQin*}#rSlPAP zQ6wUh+XyIB5#g~!!vYj!MYenPzQ`EatHwG7ijQsk6S^%5CIsoi z)*fkKu_lyLCx6sf$H(sR`cnb*h_+QDvVF&K&Zz>x!p5a3Tg+D@Qs&q8d@3V`^xVhr zoLD?{_?^8O@fNx){vxIWS6I+JUsu=?zV1EnQN4X51BpR(M@9atRblK*e7V_|{l9(y z?A*N(7nr9A-~ft!N0Jg5Q!P-V2zTdADvi5EFxX%nu6}POWTy?ma!A!imLbKvGTXF~ zEUv4?)+QyyK*S_q(iUe-TH~(acLTaS1FKVBNU*ZD?Kk%BA$!FREG*%3EI!v#t}7v7 z2y0T}gjkg&=;yn?% zTg{RP5}UL0FJ@ZMYt=*^4F6Y4D@gjZMcvb>$nT^S>{T2fUy}^@wF;I8lbc&}gN=k` zn7aO=t=*Ani&mB%ER*1jiHf&`;)8=IeTm5g8B@?-3DcdH7wC(T8}m}JMaKx{?>!m6pCvV2 zXFbOc5qI9)MqReTjmwIZ@)8}ys{-=lG(@Ns^!mNGnqj1RlQb+FsMtUb=0>W~czw*JNIFQrm;>Nf}=7FL2@D?_n(j5Ajf+Out%TSr;>{hk{(bNl{+V$Z;` z3u5se8k?-alNKZ^(D|Bhznf%VFQ*d*YQ?igWmFnih#-k3}*LO{A&1Q9`=7HN5RJx}}z@$2n?9*QW}MLFf;21>K=Y$|qevgHu|;iI8U z+Uo@?5KW1=s3^daw>8Gd(kM%lTx)pqUqI|%dJIB|wktCO1*%yJ2obCPTiR^&_n)P2 zf(Z=;bJt$HB&L?=@+%hMNx-NKy|e+Izqmw9Uc^7hs@cSO1?P%z=DOta6*wo)At8D< z9Bk#bo$byKOS2^!S?X$ZO8zT%bq|jP3{C^@OttbcP)AC@e)Q2Q*bo*&8>tKSdf0sM zK7d#|@Vu2$8f=CdL)c*Hz(jb>lpaiVy}kJ*`n>@|SQ0BiT@BqEvPD*B~@>qCnH=MfKl|L)GJ2*2Z`5=J3Cd}(`i#2G{aN5J6aHT%Y- zt@!R%eXoJgvjKP9Gp{dc6qN(Xe3YT;?JmBB&+b=M&Bn4_b^ZLAOC%Tfb z5$E}wJv&hN|H`3N?B?Cn;0 zmAs!-20dBC;$!mjQ^U(tfipHbvv1#}@5KX1_WdMxIu?&c>%J=^dD7?ff=+qF`oODv zLa0hb)m*b`X>NmAs&}GENTQOkgN}5|O%ZcdNQmTMy>q##`W`aLdngsuzc3b|4$&Yw zmF0yD(2pYuhiu?Q`1#`TMi!_NIkWkv>V^Vk-E$ghPbSrGK+o=%pLpHZ?1(x18Ewe@ z(waGox_5g)#OUQ>hsX%g4SrUS;h3=@#uyoDQUHzp(W`A+P7H9xJ=r@e9PtCf0LhI5 zxcDCjAbRcHU2{^I{Z&RJ@DjI5J#b7+&|1epLVj&cnx+xeLzT}ekanv%)S#-wp64L& zXJx^RgrfL}Y7_24U{eqS_S5Eo^ytI^UBZ(6)AKUTDhRI|kzPm^g2s|!%3mH>)Z`*a zR7uigXtNqI8A;`c9T-*|e565==bhG8?m7~De43S>>%n>mU?uagjPwR zu|Fq>pbfQ}i|U9PyB&%>WDq;n#1clTrxmMGWAX6_v+-x(O&2m`_aoF1x(CmTNUsqM zk0VB?f)&oUT*ajD+jQeENc)QTcaMo}>l*hd)4eB%u3Q0XttO$d33%DiwCZaG*+t

YZ@8k zpoZOpu0048Q<<9jkW>O9BYO$d;5%xtLrbT)fDl_d2=MOg9=;*F%*_7u{T%|X^C{kJ;s6mu zH090^7HH1)4?sp14xnFzMr@wna81PwO5nWB{n*@R`-D-{a=lOD+^oC)2GxUodnYZ+AQi!mM_efh^Bdg{!?V#UQMq zpgeGcNeGu$dgHvj;}>d;N&uPqU2Kq3*PH<}X4d|)k-wuUVh+$QEPGRsy=Ip(;fF@*v;s3D%L388a^21o=F%LlYq7I-macw(~m_wHXK*K`%(?oDsFFkmh4w*-l{zg zy)>nsuQhJco2c=RB-XX`)C_KbPzbKp5Fh#8C=SA`b8@?gQ< zbVKMYL^&?#VHemd|D>Ef)tUeumH~^>#Ih>8*%*;jxudX(ZuhVfjAlS3eW9ZQwR0RI@g5q)!k!F-*b~f`5eDsA_ z@^b4hGC<(jthz=!0={;#K4LYmm=)3~rO6*6BsA6c<$6gqvmkRH)@zy%p8&XYfV~jq z0h}VqZU=kV_p$}=voU&}RU|KdLw@LL2CQPvNs0>OmAJRu&{6dnEmCOo4r010ah08^ zEw>J9f*7eRSFl^M3|XLyQH!{vd5&}s2S{Hd)O0mAhdnkhuEPnM`_B+s!ejb~kmQky zCJ&~<1~WW9rv(aTIaf#x0rrsnZBE1+7(vTPyCYsOxH*nEvZ#z|rd%-*wM3|CId9rv z-6~ChZv{Jls$3jQC`i31GAI81y-;VQbBI~y&DgxD@*LIdy~l_~gO~XEhOgA4Lfo|y zc;*UFW89ayXf&u6{RFA}$n`>dX=?T;ULu0=UN~{--Z#?Z72vUWx78_I9C)bmp5JPL zZ7=tcvi&CMq=8x(W`)bEPetvC)E?wIIu7=Bl4=SOV<^m~Ocy9G>VqLnZ|@22=-#F& z5W=4U%nZLuAcFnjkq`_d+)D;O3-$nI*kie^O*RQslT?PyH*&p8|5;746fH1_idN6& zQ15Xy3D>_C@}m){V#st;-2cR$B)ckyV-UK?s&ZU!O_nv!-ejcu_WLFh>19ot7B~;V zOV*Hd0NzFSUPublUu;MM73pPw6b-=$5|ZB4UQ5;e4j%)lCqmGN)WCQnG$Pbyct~?! zExj!y!8Q&h(Kv0lV9uecFzuQhPJY|=neU`32U*Gnc!@j2?4iQWwuUc+gM@LE3_b-= zk*aeMot48{+I4m52<-(>n<0f?r8A&bUrW;E*Key@t+v}G_4ZJ8N~%rGvN{PP(1zgh z5X+z=u?H`*vk6>pQV?#+Ep6xped@hTg-qWTsEpBrwRNf0@1`u+64+i9giL=9s7rd@ zNblW#lFepU5_n!jVhB-by zMmzBr(%idX3Jy0Q!Wz<4n8vC%XHkD#Ap6(jSnjlsg^Ab@!8vK1I||_N6C!>gnWbn0 z?LG!1-OzOJFU&`}@UMrvfbE<6aBdHEzwWMfarom!!|nGF+f({TKFi*(;}a8@$e5T5 zd+7~X0tOuA0v59Pgfu;$)g;1pWcDq$-Yg+3f@nJiHmuSokcK8B`|;(@RV>0fo2Jfb@Vuzf8o}gGl?|p<#_cZeY9l6Kq5m)ysN>SL}wr2Tl{2gPN=EFal zbpJZ_rfklALU1-xmMcf?mKGg(COEJp-0g?o1s|5C$FPOPMXz&Kdyk5ER!@GRk^n9U z+i@k~Z@DzkIe@R~1bTrdL$6?NT_dFfWLKoA(?MNNp-7{~Q3xq)l$AGWqnE3mA1P5s z=?=Dk)U&?|V%+&1FbmE=Dc7?dcAFDg4G`I4zDKT1*n!%Yt5K-mQAKEf0xKYQ%Qhz* z%j2=R0t0L-XYqhjCqro}w0C#5q1zkFSj)niUqFyF2|jTu>e};k!J&jkY4Y}rX#rr( z+TeQgXF-L9+nfrNgj963=aE67pAlN1hy_5)#1PaOYn%nHyVw=R^lNYGR=?ZT zxE&79e-6bf7={mnFw2hWuLBU$W@l@CwxpG-bQ>IC@KrwUZnO1JW#@1OIHBe{&?Unx zmyOpceGfHzmUUxtZp+x#uAB4$O{#F1P|!|ton~JJs;ueXiE_3DoAHHK+Xd$;F}8I= zYnDCd$E7XO0AhUunEkxb>8^G_O%+7x#5l|~lHf>i0k zbTE;S&7|wqpDE6 zm}bJhsJfyN;jK1l6GqpUDNnx&mJmgNy|tdw-69l^Q~}xC_vWKEUn*fU$ia=G@NB?B z=d(;>)HeO`(T}zMfW>3N%o|cAcB2S~f{;km#ZUb#J5$LEbB}W5=fCWOlx0WC&-tbz zTH4ymt$}(Fg_Jwli5t%=ZGUE4J^$Da+@+Rb!TAGL&PEN%NEKD9Q5M=B(daOsh6chP zh-^%AI+oS+7IB3FVtTi{SJ<6D%JZA+M6bI3M_-b%eiig6+#;eGtb7>p!Db)@{GOyKMtn+q2^_dWbQwMpCkHYnKuRBSgr z`$lY%;;#=8U*i(XSe^V z91HkA5Xzm!LRIU!s}a_RU~Tt zk)&FLm$Q?{XB3$dYPe=Tv?PO=N6Oq>Ux#yOLtqw9S;^nK>Mcaw2uvEI<5?n{iFE93 z^RFV+ST<zNw4U%MhatpWT z{PE4`bDi(KmOLNJDH;Y&X;{G6JE_jWkBGI&qSXneUYKYYdcQ&Vfg9nBnR#xoXtJ8t zUvL~+YJu57dgYC^o3l7wk7WokF*FA<$jC#C0*B_x5Xj-}2P})Vg1RtQXv(2Pm?tY$ zUnPof=O>%UvCw<`u1jXQcZHUFR8@izj;VH0x#Hdl(;d~@KQ4IpO$^mC9)wBOF1}r}!&hdWUxt6>Nk~dujJq;+H8#Pn% z=A0$-MsMNB_l1Q89k3T9?rtBbr0WgKm!NM}px?*k{twVEImH4?YBwnh$PNKi{O z-OQ?&*Rr?L_mn`k%(Y4BOs58~V50u(D2!qyby4_s(5Xbg_aaS?X`$t~RJmT30u(s^`0v~w_g|9iw7J1S z6+EMdV<8kXB|ivyY@~0HUE#D6g?lvY34<(Lx$Brzd7XLpukYO5;(v|%tm`Bg7sTGa zbm^$?Xa5+N9ry_cK#!&Ih9CC;=kQsA##+v%b_LB~ZNpV;_5E#&E-i9$azWp{iZ$7* z!)+>s((kq|rj6q(%sM~jYgZIJG>kmEe>6;!g=58DsuhmjKWgOPLAj{psi z>+5yL5T#vUv;Hr@bQuNx*e50DfpZR_=RYz*cysT#a-3VdhjZs@^q1fZ6q3P&{vK?0 zXaEfk$jXPJ_yBQ?kn$6}`SL6lL#WEsE2q3m(gtS(6chS*+I5~a+T|7yJIWIIEQLg! z)mf}_s}vod)y3)i%CW1lmxWv_MZn32Z@;*M`ft6m{v5%Ai+K9mWeDd(=>V4`pJ;+uT|vCW{{Ha^OCX35 zTChOuS8D4_0s#c<9hQx(S?<-Il@!@fE{f3-nByV%Z$a=)EI37xn;-P}2xjcuCCygV zr|Z^6Y_(N+*96b5ybzqL?hd(hziWGQWY-B;6^xMA>RIN-9qfv?LFO0{GeU!}z>s?3 zl?&%(nzq&#Wlu`I=(>JdRdfSXV4nPct$leo)cgB)bxt{PT9G!S5K3A&w(MljHba&% zbxK*Xgt0cGbW}(rOA#X548~54#{+6r$TqexV`L1!`_po+bA6xhb3M;If8Ou=eZOA!>wewNSu-{E6_<;P7$1CEaB$>GJ32Yr-w%V7vU0R$adwCfwq+l- z>F+()l1R$nMXOc9_Bzj5WrwVeB3P2$H?Y#Ra6L)WV7K}B({+|yD|44h3Tez#Q?3_08u5tBTXkaq0ZVt%i+5O2z56Owbak=LZ|_4r zv0UKE4o-L4V9RG}c=HgECt?B!6}s;Ck7xULWoCPNc&L+=afDt}Hd4*l`|VH6CB})vK+?hNTBq7AmJsQVvh0 zLE5;ZfkD+*4o@{kB#9~d*QpDg@z{V6ZJe06X@*d~<06BcQB(%s@rayU%kaoZ?>GVL zCfrd`>-m-0VMd=DZk!UWhkE6uQPa=Av^O59di$+4!ks@30kOa7oL?>g=2^kB{{uzq z(A8eZBiv7~53@rq@|nFP-ghetQ+@8(QPb?1V|FKLNPx&Y%>}p5F(j+UVEOp?3UV-M z>iAHf16ERtsNpeaBpe4Mt;|NeKDm(nmXli$Udg@9$4KiTZ>Yd?Mfcu!se6F-y8^yq z?5VGxXrhPA(N3kQc|nAgK_OaEqvqAGpLSZ49c)K@_C37m-0GwGsY>QkIes|ah61U4 z)rAjjV2agK9kL?j*0x2v_lT5K(frs~BKMik=-kR*u6hQ$Tc7S)64X9_Z{+jmr*MaA zq*vvQ32lmcpI&@T*J-H!suZbwG#ZHN!%E-4d{(luPj-PBqIlzD(uYGQyc@E8OD{oq?eU|&T+71UW| z@7PpLaEi30Ec_>m&)3;$#iaW1n&3oOBmq;Ak*_m#ybi<2yMEs7Z#Wr(UZ%Q&&OdL3@+U=p-E4vQ!08fk104)}Q8`b~D@NAAYX z@EG|b#9~PooxVF$1(USiTt3_7IQ0$xFP3I?B)8CSl9`8p`v;@H5JlWGrpXzPHR(?} z)vQZ(IUQDb4e3akXS)0gQu{0vNMJdpUUBN8#bb4^kDC6qyDptMlQnuzhGmH!`8xW- zpT@xidwd8OimI4NI}v`h?VilaRt=Jf^>DUK=P;PORX&za8NEj8QMmd#ZH1W=8%}$r zE{X~yjSd1*@q9^og~DjenB_vN|8;$lz?u}FX5|I9Fc~eY_y`0|YnmG67g-9OS--P- zY!I+_@h6&qFU04n%sgKT$TTjxnf7GJ9F=7_1_N?>f91mFC{e}@$tYt|`bYWNCyV6f zaQVM(*YGunNtNJ#Y1M9&FYN9|T9*>~V! zg!&Q6*nWSA)I9nNnbRE^(?v86edh92EwlUOw<8QtrnwQv(2-dr0iEYxKN`M%L5wPA zPQLHBOH?frRkS>B$lw%FO!NBY!w;r9xA3WHFco>H)YN8t`w?%-3JuTxllBKvQ4h&i z+ys;IsIO1*Y$v*{fQ+L||Mqz44(Q%v=Uv#%_ykYP2QatvxU1ZAjdYc5v68!>E>7Kw zZpKfQclYkhk{MV3cYZti(-M8R_^ge0Mj3~kch4_b2MZ( zYFpuSIs*u#H02DO8qB3PFN-_0#3 z&s8nE%LNuEYNl{#KX&xVlONpDM!Yo4xmzrgubPWzZI?)6Bp`3TS)$puzu0z)3n&v^E6 zJTTjfE^2k}9_7-Zr#9Uaf4KLau_0U_e;(D8NeUq5k7@_bXgPi2ZbbjIAwwDcX+YXF zzjmL;*bn108NJR%Mp7E7Phm~X+W4P&R>|&v{rHoS`agRy|I3Z;|JJs7JHnO2HPTF7 z9E_}31^o0O#jn@J12UZHJ13fp&I?`vZOSwM{=kDhoL)dZ(-glc#z;%r%*xl6kPZ5; z8(UiZLl$3)I)Dy`j$lawD-XLqM;D=v=-|CcM?{MpBuxm0%m%qc+-W5Q$Z05D1@=`S z>G)N@oqu8G{)|3F1;@D1A{PiJhmLs8_7V3&C2czQ$z5;A)_tN+&KZ0w^u6CV$=-n1 zPy(cYfo9`L)u!ix^OH}jqvc)Sy{X_Yuq$t9enOi7BSEjS2O6eERSq?Y)f~dmo&hg# zdG=m~&mH=ts$>3?B|cGaWaIoJgSh2osTusK(&1G z^|MHJw(wvt0DQ|puE%dt4!-O2iV4Dj(Vi-jnZJ;>R zM6dJ2&yZ{iQxOYq!$ksQk6_K2?YG*9UHKE*l?jE~C;!5q4mgx_RRcIzo1Qb{F!R~}*Cm@t1w*ep|~lSn?`xT;`NsZ*I# z(jPP-%aM@o>u*K+7OG}0rQi}|OE0zf{j%Zr-%fLrR?gu^RR^wwEOn$!KFiorxP&8T zpi5wlZb$o8E9TzT612~G$|`m$$=&Q{JZ zI=VVxjtq{18=|kO|8&{@-_>$Z0Uh8Jrq&DLkV^y~Ete; zX>p*{PZ92r%TR< z23!PvHbS9bWcL)wwktp|dhKm|Gy5h4iKoC&^8_~+*p}Ab&6>L*y8MC)g=P=PA}a=f zeWQ~M__KVT-B!oT1x^;%g{s-i?KKxn_Xn!heoMu}6PGsV$JD<%dGI9HIfK`Heb%ty z)wDpk+YaC1BIrmSUnnahQ^HQY(69q><6ytylk4$!aoR{Zyh^v*^*S2bL_U7Nj&5{v z+No(_@MnUqK z>x}uoN{;{j_^_>TEW^eI$mHqqC6+er!R4MJF{N@}-n!))PL0m zUwkZ?E;Z_4U3{V_D@(yLUJXfqem5}IWb7HoJ7qLqyT>zbnZiFZYm`0FJ(Qy-`a)oT zKZr{G>_E;kDI|rtey6ezFwSBl`@h!9ix!|lqPE!tnB z*JcxHEUuWBpk_{Fcv-a^3rk^?VD0=emS2RhLttA@1y{@zlh~OhyieNekFKd_#Gr;h zgwyqy=U#-Yk%}a2v{;iXl>N$_Du#+91X_d*_FH!0n_n;;bSu^%a zCSh#{E;o{z(_mvdh897;oo*UMh^vePu&=5U<0Wm-o^nm0Gtm;3Yj4yFnr4y<7=}4# zwbJe>v_;g4$H4*wSg_K9+Q&IbTXY4_66uIbbv%3pV9?z)o?suB|!dD=M&0po6EOsaZu@m3%^d?cA#ne)GyQf_Jg7XHA^)-~urX|BJUpO@l=yEes<2x_j zrFawf5ov)DQJ^6;M~p{s901A(Cal&@@X|*>}S{UtQ%fOH%k`X_T){#i{$MM^P7H_V%Kxh3&AY;Pjq@} z#5PN`zx0dN8mWxNj;|^uV@K=Y2(NRL4|@;R4Nit$-_s?Wec;=wMM-QZkr;+fS}C`)R1mp?f!HKFcS z;4!cwPpx-(n?C29t&F?g-Se2ZL7V($DC?N^{i;q5wg4w8pL$zFkz3mJVwNy?fX;(+ z>fTc^eM`o;@yW4M(aIw`Imt+lNtsCOiZ|2|bO1J~%Q|Vy$aAs&d|u{pAn4rWFm;fZ zPc2NWgcHzK26=U6k>tKr?Yw=~?3Rv60;EJmoC31>lIHG_C14e|dJf|~TdU=Ew2pdm zH49r3mOkXRj4@J%rPp*6*#R9H;`P~$wAZTharG8w=Og$PR+?!en=z`{9aUlQXhl*@PsWv#X5&()tt;KWADO;bY6G^@u`xU_;scn+T*u0uYLFE zI}bdSpM_NyI78GvCd}~2u%+Lx3blzNgc&{(w)8wzFF*zZoR$jKA+n^7Q8giIh^Sb8 zfiXSRI1GDGFz_VPEtYa8htlMT)^K#f5en|qR^x5Mk+92RV{5}yp-Ra6Nb z@nRA;e*m^Odu{1-5rbPKzg1wV)_<2cuX!UNNEWC1o#pet8l7;2=5)RfTkTCvRu9gq z!ne!x{ot*yDH*7Ws}*7H?!kkV4(7F>$LP}PvlRQtI^BG<#z3d*)fmoID>udE7C!o| zG@?e(;Yk=`!uizx@igGwb;cBY%J7*`5UZn$F-((3sA|E=liYu|kz zg_$e}EsMK`@=a=fGXGt4vPSb=mSC*w@0Pe?x#<6VH&#+(gNv=Nh(`Jkt=Jk}Tf3i7 zbj^jNGVS)^L*0Fc7lzvceSiL`+Z1?vywrwb7qx|V!6;{b?D>xmu@&bMmRV_LH>^JJ zuD8?G+raCBnTqLXTi`dz77QX{w2R#LT}%_AWc6+KE4OFS3nr))IFp=l#C8aIZRPm^ z(_NnZSW=179yjE}=Y;;yW@tV_DgCTXTr-LB^Z)T6zO@N`|1JMj{_?*cgXAI&&XQxN zRmL+xEAfrmjkj^U0$>p3mHFpp)ybl>ZvHuG1j-%&NCk)XYiJB0sLyW?LfUW8w?f9$ zdY7ghD7FcrBR_Lc?L)ai+*SB65V?c;ygd|r_}RKVSiu8@UgxYqs`H=j7e1HAaR zvM2-^dywp{cmpo2`S_;W7ZyX+0vRIme%Q6`kK_7~T`UBU7e}>8qZ^Og*v@Prjs<&@ zW*UOF5Z`c2^v1cTXYiVOo!^bU4_trM3Q9=|V$?qkd9L3UjpZU)5Km--eD{*a-kYCJL~haE6){`46n)m6w~4?F^$`Gk&?%_j~6O7$(Itx%Gcu4GI_gPlU?3*)&ghi6sckc+?ebY z@3Ly(E!av0kZn(cBv$TgenBG@0n3ceJPdT8V;}{kqirz57h6)@a}HEDD-He5No7M7 zw+fl|a}3|Tv`|T#0z$*{6EWiSvI~TMs)o4Lx%Y(x)*%}j0k3$ec`s*HR{(WaRHGmI z1Y2Y=ly|tP-cE#&mJM7Hr?p&$E}~5$CwpYE)gMk5-bRE$Tqw$vv!yVK=G#!Q3EHdT z0tlX6+Uiv&?4Kzd6Y)x!DsA%cxP;Q2LYV)+Qdi}q(3bxOXK#FivrVG_XZ;_pCCWo> zhYCR4g(vep3A!CnZ52oXv*CX8r6#DNw;<$43d~rVhE6Mgx$4C~4Ly$*Ad{%Pl@Zc^ zYGM%?rJ(s4zJFy$vl`oh&k5&1u3mCwBV%FQ+nn;0bn3Q-AbBVq#^{8qje=j@5YoNv z4{GclAFY9Klzhg}_eJUm{u`;nzFe}<7e;T7Y1k67sDr4Zx(l%yReJ?-<6oc9+P*Tm zc7mBJ_-xzKi_^ddZ(gDNiNJA$Xjx~8GU|f66K5hPi8eT$Bu~t2yiM`SJH!S>Ba}ff zm+H1xmE>`#BqNr8M~+#gIhr|=K2L|@j9-$v=2%!E77ICy+o)kC7Kiv?#$vJMU2>&B z24_wkoKGyoy0auR`iOe8FGvgUX=By~$rVpMcD=t^KNi^Ng4uEQMFax2pIAk5w7QO^#L zYL)fHY=kjVl|9@xCt}p|_UXvDPC|IQyD>bPefhrXZe1{4L8kOM)Pj4CSvVywHqNf! zp+eS<0KTjp|K%kQC_*rLdU`v~8XzN~kO^k?*{4$&D!yY|C&kF8qD9J17GG{T%x}P=(?`YBIZ{&`4fCY@&4#Sdw!&iGm(@T& z_wrl=BDLGk4rrjM#Au}YAmpb!{d6cl&VlAD^>`Ts6|R^<*5JEa8{3lgVJHODD3iHM z8A2CqfkdLkD&9Go3Jw!h!b`Uh`-8YUJeln{>iaSIl!Jn+l^L8&nw2xLkEke3Peicu z^g)fU;VJsG?F^eC8u-Zh+5Ksdi@nD5mvQQLS*ehE{s?L-7;?^)US2*qs3WrNH%kis04*UZT^xU6(3<7zJ z_1(g$l4)a=e?2PK09irvF&`?aoT=Q)-fz{D)%XmWe7=o)S^IwQ@5rgA+Q(Usth!yn z-3}a6N6){9$yR;9o?Ny^6)=85)!eV12h5Sm6VAk254_wCq0XhU|IV9}NL=X%zSGZt z)gnG3?&{{pUg$bF90N@u^T`7}$DFnYg;+KZO7Cf9`>s!~X9{CGCmURRm4VJGHPI5w zKVnZ+!)H~EdB@c+I!Jc1L;i(fh4Kj00N^?%h3?EPBec1T)sMc}k}*?rSWVHA{@V(Yr(ISbb?yKq*G;u)xa`uV zu7+b`1TIg=<6ArAW|s7-^EpVuQ4^)j7iB|g8Isvx9+oh2;>$a=1(00ekoi9AGs1Ji zwRuvqw`z~F23(l$G3mX)!g@x^JVmX!zPN;ptMNfEHfWhmrQW}Po<7;xXDJp>F*qck zbMep&_a!LK^bEJyU^AJ(J04dmf8f1MxZ~*);>|*Cs_^V%~z zsND((U2J8@;)+Ur%iudt6z_>32)Et3J(v5+e3%||j-gkm_*X=`#>3B8fbQJw`NwQn zhoZ5%tG{(hn&I!liA7o z3xD3qUu@(2gr6;2B0tFk2@4m<5Ee)W6PO?u2#Vzb$8lIW`y&&eXW12fTvAsrd;R8& zejgRnv`Adi&6Zf;b($iiP?&5(0*d+A)w1S(Du0rfewdET4vPn5e3N(K3!?!V$}3~W zn^Bw-;{u|lm)HaRVSynAnGNQGnqT)>zSrJQy*1{IAVT}D*fTFO9rWr1uPqr4H|Q8d zGZH$*h!=yI=jU%bW-i?_7ulqp?`0vXBENO%K=jG&k7Cui_bu47-*E2qrr=1K6Rbm5 z1~ZDaSsOCVPY8ykE#VA3A0WKyCfgW;^z01*UCEEvS((w~-EJ=WIs|+Q+Yta7!Eisd zb0jl8`q(h|?UdkL8mi>Z>7^stPt3#iulYoV9CmphsoK)>7(Uh7lLB40WtraQZtGKa z->~Fo<^ERF;R2I6O2is;wQfsRq6<|p2{M~BmcQtzZkqK0TxkD=9HAwfqUv99o^X0| zn4{~Z^2#7ShH|Ei&INKwzi6zqnRLNE7id17&WJq1yYQ&VxCHcQvB%=#sZu^?ivQHB zzr)rPxPUVtVbuYnjl~upR7h}oAe2kMbnvYEj(8bqchn~rugI5m5gvr7hp+Y2b6GaW zonxP1wd+Y|L7V&f9fJA3xx78*-yb+=<~ z5$+w17I6!~yUMMK$qM48*i%Go0kam4@VD<++8siA$C;x`IZvkh5y1Ow^(r&+F6qquOOMPFUzy-eh(g+op?O5sI&~J- zsF?CA6RV;`M4R^KJUpgX`j)tk1;M6Lu@3Z8&e1X!)U{-XXYCiMOy;8N1`}?Sh=0^M zOOdE>QUr?I1VRlLI;)Z4n#p<6mq$ZpgoZxJ;`6~8Jy8tSXhZAuZHEPvG7C&oD4&Vk z&BW^sFnt&%MTc1YfSTS&JaT7fL$}%{2kyKZ=dOY)r!K+rM@dZZ3#0n@( zL%bmoEa6!6yDx-T?T*@(*Vv{pGuNDKM1d_;|7|Ow;)~K0SYNHvhKxB54qd}v0(ZRe z7=PXpmyk2Yp`A91Ke5IF99O7DPAr*jgn%r_qc%*4!t~5N(U~RNb$&Fl?#ut7pStb} zl)xeQ%k1@jBeo-!WWNno9$s6m!wwnJf*HHWob>f%z4?R+nk&ml zkxlid)6xPP7Jfp(&Fz~F?JHd-j(q(zOm>`B_h?E4(}yPe$$;1R-A^x?CO<9YRjj%- zO#_4Eig$N^YV_l>)T*fK+TB$rzycbS!UlSMD%l;r-@3cuuiw5=rVU`xX8ky+6T1Ow z?C?)12&am?GL96)pkStIBtGyz-@Pv{;a4md)jV|2_XljgCS@FK^5gphv;<(AXI)rk zJK6Z;Kk%;4KybH$Td#P}p_KoC*DIWdIUj7ok};7ZrZDcT-9;t3`Nv?rHNlObO$aSu zW~xb!>#zr)ccOQ?x3Y2IKLK%3Wj>bnvXf}lT@Dg0rR|R2C-M5(RsUGx{{ALb$u!zv{pqcy^W=^txe+|_>{i8O$W{pM6&X{KK=x~2uBB3Oq_Kyzm&x6YQFy*ee zusU&xpMZPoA>z9B4<8&r!a)51Lab1qF#UfdfqxkB8SJ|M6Q1KPXn6!eb*Q1>(i-bF^G zCCR&NchC;_=cDg;f`E(&oYtA+(&c1i5MKmYT*yS1c3<~{%w)wJtf}4ip3m4*mz&`eA#m@4C;Lf+BLMZiRC}aO53REA9gAKkL1FSMvnilb^ZS;0f 0.0: + + # Do alpha splitting if alpha value > 0.0 + print(f"preparing CIFAR10 and doing alpha split with alpha = {args.alpha}") + train_idx_paths = cifar10_split(num_sites=args.n_clients, alpha=args.alpha, split_dir=train_split_root) + + print(train_idx_paths) + else: + train_idx_paths = [None for __ in range(args.n_clients)] + + # Define job + job = FedJob(name=f"cifar10_tf_{args.algo}_alpha{args.alpha}") + + # Define the controller workflow and send to server + controller = None + task_script_args = f"--batch_size {args.batch_size} --epochs {args.epochs}" + + if args.algo == FEDAVG_ALGO or args.algo == CENTRALIZED_ALGO: + from nvflare import FedAvg + + controller = FedAvg( + num_clients=args.n_clients, + num_rounds=args.num_rounds, + ) + + elif args.algo == FEDOPT_ALGO: + from nvflare.app_opt.tf.fedopt_ctl import FedOpt + + controller = FedOpt( + num_clients=args.n_clients, + num_rounds=args.num_rounds, + ) + + job.to(controller, "server") + + # Define the initial global model and send to server + job.to(ModerateTFNet(input_shape=(None, 32, 32, 3)), "server") + + # Add clients + for i, train_idx_path in enumerate(train_idx_paths): + curr_task_script_args = task_script_args + f" --train_idx_path {train_idx_path}" + executor = ScriptExecutor(task_script_path=train_script, task_script_args=curr_task_script_args) + job.to(executor, f"site-{i+1}", gpu=args.gpu) + + # Can export current job to folder. + # job.export_job(f"{args.workspace}/nvflare/jobs/job_config") + + # Here we launch the job using simulator. + job.simulator_run(f"{args.workspace}/nvflare/jobs/{job.name}") diff --git a/nvflare/app_opt/tf/fedopt_ctl.py b/nvflare/app_opt/tf/fedopt_ctl.py new file mode 100644 index 0000000000..0ec0d2420b --- /dev/null +++ b/nvflare/app_opt/tf/fedopt_ctl.py @@ -0,0 +1,160 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import time +from typing import Dict + +import tensorflow as tf + +from nvflare.app_common.abstract.fl_model import FLModel +from nvflare.app_common.workflows.fedavg import FedAvg +from nvflare.security.logging import secure_format_exception + + +class FedOpt(FedAvg): + def __init__( + self, + *args, + optimizer_args: dict = { + "path": "tensorflow.keras.optimizers.SGD", + "args": {"learning_rate": 1.0, "momentum": 0.6}, + }, + lr_scheduler_args: dict = { + "path": "tensorflow.keras.optimizers.schedules.CosineDecay", + "args": {"initial_learning_rate": 1.0, "decay_steps": None, "alpha": 0.9}, + }, + **kwargs, + ): + """Implement the FedOpt algorithm. Based on FedAvg ModelController. + + The algorithm is proposed in Reddi, Sashank, et al. "Adaptive federated optimization." arXiv preprint arXiv:2003.00295 (2020). + After each round, update the global model's trainable variables using the specified optimizer and learning rate scheduler, + in this case, SGD with momentum & CosineDecay. + + Args: + optimizer_args: dictionary of optimizer arguments, with keys of 'optimizer_path' and 'args. + lr_scheduler_args: dictionary of server-side learning rate scheduler arguments, with keys of 'lr_scheduler_path' and 'args. + + Raises: + TypeError: when any of input arguments does not have correct type + """ + super().__init__(*args, **kwargs) + + self.optimizer_args = optimizer_args + self.lr_scheduler_args = lr_scheduler_args + + # Set "decay_steps" arg to num_rounds + if lr_scheduler_args["args"]["decay_steps"] is None: + lr_scheduler_args["args"]["decay_steps"] = self.num_rounds + + self.keras_model = None + self.optimizer = None + self.lr_scheduler = None + + def run(self): + """ + Override run method to add set-up for FedOpt specific optimizer + and LR scheduler. + """ + # set up optimizer + try: + if "args" not in self.optimizer_args: + self.optimizer_args["args"] = {} + self.optimizer = self.build_component(self.optimizer_args) + except Exception as e: + error_msg = f"Exception while constructing optimizer: {secure_format_exception(e)}" + self.exception(error_msg) + self.panic(error_msg) + return + + # set up lr scheduler + try: + if "args" not in self.lr_scheduler_args: + self.lr_scheduler_args["args"] = {} + self.lr_scheduler = self.build_component(self.lr_scheduler_args) + self.optimizer.learning_rate = self.lr_scheduler + except Exception as e: + error_msg = f"Exception while constructing lr_scheduler: {secure_format_exception(e)}" + self.exception(error_msg) + self.panic(error_msg) + return + + super().run() + + def _to_tf_params_list(self, params: Dict, negate: bool = False): + """ + Convert FLModel params to a list of tf.Variables. + Optionally negate the values of weights, needed + to apply gradients. + """ + tf_params_list = [] + for k, v in params.items(): + if negate: + v = -1 * v + tf_params_list.append(tf.Variable(v)) + return tf_params_list + + def update_model(self, global_model: FLModel, aggr_result: FLModel): + """ + Override the default version of update_model + to perform update with Keras Optimizer on the + global model stored in memory in persistor, instead of + creating new temporary model on-the-fly. + + Creating a new model would not work for Keras + Optimizers, since an optimizer is bind to + specific set of Variables. + + """ + # Get the Keras model stored in memory in persistor. + global_model_tf = self.persistor.model + global_params = global_model_tf.trainable_weights + + # Compute model diff: need to use model diffs as + # gradients to be applied by the optimizer. + model_diff_params = {k: aggr_result.params[k] - global_model.params[k] for k in global_model.params} + model_diff = self._to_tf_params_list(model_diff_params, negate=True) + + # Apply model diffs as gradients, using the optimizer. + start = time.time() + self.optimizer.apply_gradients(zip(model_diff, global_params)) + secs = time.time() - start + + # Convert updated global model weights to + # numpy format for FLModel. + start = time.time() + weights = global_model_tf.get_weights() + w_idx = 0 + new_weights = {} + for key in global_model.params: + w = weights[w_idx] + while global_model.params[key].shape != w.shape: + w_idx += 1 + w = weights[w_idx] + new_weights[key] = w + secs_detach = time.time() - start + + self.info( + f"FedOpt ({type(self.optimizer)}) server model update " + f"round {self.current_round}, " + f"{type(self.lr_scheduler)} " + f"lr: {self.optimizer.learning_rate}, " + f"update: {secs} secs., detach: {secs_detach} secs.", + ) + + global_model.params = new_weights + global_model.meta = aggr_result.meta + + return global_model diff --git a/nvflare/job_config/fed_job.py b/nvflare/job_config/fed_job.py index bd72a8fce6..883c83ad29 100644 --- a/nvflare/job_config/fed_job.py +++ b/nvflare/job_config/fed_job.py @@ -168,11 +168,11 @@ def __init__(self, name="fed_job", min_clients=1, mandatory_clients=None, key_me if metrics are a `dict`, `key_metric` can select the metric used for global model selection. Defaults to "accuracy". """ - self.job_name = name + self.name = name self.key_metric = key_metric self.clients = [] self.job: FedJobConfig = FedJobConfig( - job_name=self.job_name, min_clients=min_clients, mandatory_clients=mandatory_clients + job_name=self.name, min_clients=min_clients, mandatory_clients=mandatory_clients ) self._deploy_map = {} self._deployed = False From 5c6322985e58da988a5c6f5152a29285c6726966 Mon Sep 17 00:00:00 2001 From: Hao-Wei Pang <45482070+hwpang@users.noreply.github.com> Date: Fri, 2 Aug 2024 14:10:15 -0400 Subject: [PATCH 13/16] Update setup_poc.ipynb (#2752) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add job templates arg to avoid "Unable to handle command: config due to: job_templates_dir='None', it is not a directory" error Use full name Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- examples/tutorials/setup_poc.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/tutorials/setup_poc.ipynb b/examples/tutorials/setup_poc.ipynb index d1a1aa0007..d07c579882 100644 --- a/examples/tutorials/setup_poc.ipynb +++ b/examples/tutorials/setup_poc.ipynb @@ -81,12 +81,12 @@ "If you prefer not to use environment variable, you can do the followings: \n", "\n", "```\n", - "! nvflare config -pw /tmp/nvflare/poc\n", + "! nvflare config -pw /tmp/nvflare/poc --job_templates_dir ../../job_templates\n", "\n", "```\n", "or \n", "```\n", - "! nvflare config -poc_workspace_dir /tmp/nvflare/poc\n", + "! nvflare config --poc_workspace_dir /tmp/nvflare/poc --job_templates_dir ../../job_templates\n", "```" ] }, @@ -99,7 +99,7 @@ }, "outputs": [], "source": [ - "! nvflare config -pw /tmp/nvflare/poc" + "! nvflare config -pw /tmp/nvflare/poc --job_templates_dir ../../job_templates" ] }, { From 4b32f274e6993ae05a02d186c067fcdb0ba8f544 Mon Sep 17 00:00:00 2001 From: Yuhong Wen Date: Fri, 2 Aug 2024 15:09:24 -0400 Subject: [PATCH 14/16] Added id to the jobAPI swarm_script_executor_cifar10 component deploy (#2678) * Added id to the swarm_script_executor_cifar10 component deploy. * codestyle fix. * Changed to use job.as_id(). * codestyle fix. * changed to use job.as_id(shareable_generator) for shareable_generator_id. * removed the un-necessary job.to() calls. --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Co-authored-by: Sean Yang --- .../pt/swarm_script_executor_cifar10.py | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/examples/getting_started/pt/swarm_script_executor_cifar10.py b/examples/getting_started/pt/swarm_script_executor_cifar10.py index 4a04f10f40..adf59091c5 100644 --- a/examples/getting_started/pt/swarm_script_executor_cifar10.py +++ b/examples/getting_started/pt/swarm_script_executor_cifar10.py @@ -47,19 +47,22 @@ executor = ScriptExecutor(task_script_path=train_script) job.to(executor, f"site-{i}", gpu=0, tasks=["train", "validate", "submit_model"]) - client_controller = SwarmClientController() - job.to(client_controller, f"site-{i}", tasks=["swarm_*"]) - - client_controller = CrossSiteEvalClientController() - job.to(client_controller, f"site-{i}", tasks=["cse_*"]) - # In swarm learning, each client acts also as an aggregator aggregator = InTimeAccumulateWeightedAggregator(expected_data_kind=DataKind.WEIGHTS) - job.to(aggregator, f"site-{i}") # In swarm learning, each client uses a model persistor and shareable_generator - job.to(PTFileModelPersistor(model=Net()), f"site-{i}") - job.to(SimpleModelShareableGenerator(), f"site-{i}") + persistor = PTFileModelPersistor(model=Net()) + shareable_generator = SimpleModelShareableGenerator() + + client_controller = SwarmClientController( + aggregator_id=job.as_id(aggregator), + persistor_id=job.as_id(persistor), + shareable_generator_id=job.as_id(shareable_generator), + ) + job.to(client_controller, f"site-{i}", tasks=["swarm_*"]) + + client_controller = CrossSiteEvalClientController() + job.to(client_controller, f"site-{i}", tasks=["cse_*"]) # job.export_job("/tmp/nvflare/jobs/job_config") job.simulator_run("/tmp/nvflare/jobs/workdir") From 45a16d569b650378f26b3420e69c403f99272d93 Mon Sep 17 00:00:00 2001 From: Zhihong Zhang <100308595+nvidianz@users.noreply.github.com> Date: Fri, 2 Aug 2024 17:03:01 -0400 Subject: [PATCH 15/16] XGBoost plugin with new API (#2725) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Updated FOBS readme to add DatumManager, added agrpcs as secure scheme * Implemented LocalPlugin * Refactoring plugin * Fixed formats * Fixed horizontal secure isses with mismatching algather-v sizes * Added padding to the buffer so it's big enough for histograms * Format fix * Changed log level for tenseal exceptions * Fixed a typo * Added debug statements * Fixed LocalPlugin horizontal bug * Added #include * Added docstring to BasePlugin --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- .../xgboost/encryption_plugins/.editorconfig | 11 + .../xgboost/encryption_plugins/CMakeLists.txt | 41 ++ .../xgboost/encryption_plugins/README.md | 9 + .../src/README.md | 0 .../src/dam/README.md | 0 .../xgboost/encryption_plugins/src/dam/dam.cc | 274 +++++++++++++ .../src/include/base_plugin.h | 155 +++++++ .../encryption_plugins/src/include/dam.h | 143 +++++++ .../src/include/data_set_ids.h | 23 ++ .../src/include/delegated_plugin.h | 66 +++ .../src/include/local_plugin.h | 107 +++++ .../src/include/nvflare_plugin.h} | 48 ++- .../src/include/pass_thru_plugin.h | 41 ++ .../encryption_plugins/src/include/util.h | 18 + .../src/plugins/delegated_plugin.cc | 36 ++ .../src/plugins/local_plugin.cc | 366 +++++++++++++++++ .../src/plugins/nvflare_plugin.cc | 297 ++++++++++++++ .../src/plugins/pass_thru_plugin.cc | 130 ++++++ .../src/plugins/plugin_main.cc | 184 +++++++++ .../encryption_plugins/src/plugins/util.cc | 99 +++++ .../encryption_plugins/tests/CMakeLists.txt | 14 + .../tests/test_dam.cc | 27 +- .../tests/test_main.cc | 0 .../tests/test_tenseal.py | 0 integration/xgboost/processor/CMakeLists.txt | 46 --- integration/xgboost/processor/README.md | 11 - integration/xgboost/processor/src/dam/dam.cc | 146 ------- .../xgboost/processor/src/include/dam.h | 93 ----- .../src/nvflare-plugin/nvflare_processor.cc | 378 ------------------ .../xgboost/processor/tests/CMakeLists.txt | 14 - .../xgboost/histogram_based_v2/defs.py | 12 +- .../proto/federated_pb2.pyi | 20 +- .../proto/federated_pb2_grpc.py | 5 +- .../runners/xgb_client_runner.py | 53 ++- .../runners/xgb_server_runner.py | 2 +- .../histogram_based_v2/sec/client_handler.py | 37 +- .../histogram_based_v2/sec/server_handler.py | 11 + .../histogram_based_v2/secure_data_loader.py | 50 +++ 38 files changed, 2209 insertions(+), 758 deletions(-) create mode 100644 integration/xgboost/encryption_plugins/.editorconfig create mode 100644 integration/xgboost/encryption_plugins/CMakeLists.txt create mode 100644 integration/xgboost/encryption_plugins/README.md rename integration/xgboost/{processor => encryption_plugins}/src/README.md (100%) rename integration/xgboost/{processor => encryption_plugins}/src/dam/README.md (100%) create mode 100644 integration/xgboost/encryption_plugins/src/dam/dam.cc create mode 100644 integration/xgboost/encryption_plugins/src/include/base_plugin.h create mode 100644 integration/xgboost/encryption_plugins/src/include/dam.h create mode 100644 integration/xgboost/encryption_plugins/src/include/data_set_ids.h create mode 100644 integration/xgboost/encryption_plugins/src/include/delegated_plugin.h create mode 100644 integration/xgboost/encryption_plugins/src/include/local_plugin.h rename integration/xgboost/{processor/src/include/nvflare_processor.h => encryption_plugins/src/include/nvflare_plugin.h} (77%) create mode 100644 integration/xgboost/encryption_plugins/src/include/pass_thru_plugin.h create mode 100644 integration/xgboost/encryption_plugins/src/include/util.h create mode 100644 integration/xgboost/encryption_plugins/src/plugins/delegated_plugin.cc create mode 100644 integration/xgboost/encryption_plugins/src/plugins/local_plugin.cc create mode 100644 integration/xgboost/encryption_plugins/src/plugins/nvflare_plugin.cc create mode 100644 integration/xgboost/encryption_plugins/src/plugins/pass_thru_plugin.cc create mode 100644 integration/xgboost/encryption_plugins/src/plugins/plugin_main.cc create mode 100644 integration/xgboost/encryption_plugins/src/plugins/util.cc create mode 100644 integration/xgboost/encryption_plugins/tests/CMakeLists.txt rename integration/xgboost/{processor => encryption_plugins}/tests/test_dam.cc (65%) rename integration/xgboost/{processor => encryption_plugins}/tests/test_main.cc (100%) rename integration/xgboost/{processor => encryption_plugins}/tests/test_tenseal.py (100%) delete mode 100644 integration/xgboost/processor/CMakeLists.txt delete mode 100644 integration/xgboost/processor/README.md delete mode 100644 integration/xgboost/processor/src/dam/dam.cc delete mode 100644 integration/xgboost/processor/src/include/dam.h delete mode 100644 integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc delete mode 100644 integration/xgboost/processor/tests/CMakeLists.txt create mode 100644 nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py diff --git a/integration/xgboost/encryption_plugins/.editorconfig b/integration/xgboost/encryption_plugins/.editorconfig new file mode 100644 index 0000000000..97a7bc133a --- /dev/null +++ b/integration/xgboost/encryption_plugins/.editorconfig @@ -0,0 +1,11 @@ +root = true + +[*] +charset=utf-8 +indent_style = space +indent_size = 2 +insert_final_newline = true + +[*.py] +indent_style = space +indent_size = 4 diff --git a/integration/xgboost/encryption_plugins/CMakeLists.txt b/integration/xgboost/encryption_plugins/CMakeLists.txt new file mode 100644 index 0000000000..f5d71dd61c --- /dev/null +++ b/integration/xgboost/encryption_plugins/CMakeLists.txt @@ -0,0 +1,41 @@ +cmake_minimum_required(VERSION 3.19) +project(xgb_nvflare LANGUAGES CXX C VERSION 1.0) +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_BUILD_TYPE Debug) + +option(GOOGLE_TEST "Build google tests" OFF) + +file(GLOB_RECURSE LIB_SRC "src/*.cc") + +add_library(nvflare SHARED ${LIB_SRC}) +set_target_properties(nvflare PROPERTIES + CXX_STANDARD 17 + CXX_STANDARD_REQUIRED ON + POSITION_INDEPENDENT_CODE ON + ENABLE_EXPORTS ON +) +target_include_directories(nvflare PRIVATE ${xgb_nvflare_SOURCE_DIR}/src/include) + +if (APPLE) + add_link_options("LINKER:-object_path_lto,$_lto.o") + add_link_options("LINKER:-cache_path_lto,${CMAKE_BINARY_DIR}/LTOCache") +endif () + +#-- Unit Tests +if(GOOGLE_TEST) + find_package(GTest REQUIRED) + enable_testing() + add_executable(nvflare_test) + target_link_libraries(nvflare_test PRIVATE nvflare) + + + target_include_directories(nvflare_test PRIVATE ${xgb_nvflare_SOURCE_DIR}/src/include) + + add_subdirectory(${xgb_nvflare_SOURCE_DIR}/tests) + + add_test( + NAME TestNvflarePlugins + COMMAND nvflare_test + WORKING_DIRECTORY ${xgb_nvflare_BINARY_DIR}) + +endif() diff --git a/integration/xgboost/encryption_plugins/README.md b/integration/xgboost/encryption_plugins/README.md new file mode 100644 index 0000000000..57f2c4621e --- /dev/null +++ b/integration/xgboost/encryption_plugins/README.md @@ -0,0 +1,9 @@ +# Build Instruction + +cd NVFlare/integration/xgboost/encryption_plugins +mkdir build +cd build +cmake .. +make + +The library is libxgb_nvflare.so diff --git a/integration/xgboost/processor/src/README.md b/integration/xgboost/encryption_plugins/src/README.md similarity index 100% rename from integration/xgboost/processor/src/README.md rename to integration/xgboost/encryption_plugins/src/README.md diff --git a/integration/xgboost/processor/src/dam/README.md b/integration/xgboost/encryption_plugins/src/dam/README.md similarity index 100% rename from integration/xgboost/processor/src/dam/README.md rename to integration/xgboost/encryption_plugins/src/dam/README.md diff --git a/integration/xgboost/encryption_plugins/src/dam/dam.cc b/integration/xgboost/encryption_plugins/src/dam/dam.cc new file mode 100644 index 0000000000..9fdb7d8582 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/dam/dam.cc @@ -0,0 +1,274 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "dam.h" + + +void print_hex(const uint8_t *buffer, std::size_t size) { + std::cout << std::hex; + for (int i = 0; i < size; i++) { + int c = buffer[i]; + std::cout << c << " "; + } + std::cout << std::endl << std::dec; +} + +void print_buffer(const uint8_t *buffer, std::size_t size) { + if (size <= 64) { + std::cout << "Whole buffer: " << size << " bytes" << std::endl; + print_hex(buffer, size); + return; + } + + std::cout << "First chunk, Total: " << size << " bytes" << std::endl; + print_hex(buffer, 32); + std::cout << "Last chunk, Offset: " << size-16 << " bytes" << std::endl; + print_hex(buffer+size-32, 32); +} + +size_t align(const size_t length) { + return ((length + 7)/8)*8; +} + +// DamEncoder ====== +void DamEncoder::AddBuffer(const Buffer &buffer) { + if (debug_) { + std::cout << "AddBuffer called, size: " << buffer.buf_size << std::endl; + } + if (encoded_) { + std::cout << "Buffer is already encoded" << std::endl; + return; + } + // print_buffer(buffer, buf_size); + entries_.emplace_back(kDataTypeBuffer, static_cast(buffer.buffer), buffer.buf_size); +} + +void DamEncoder::AddFloatArray(const std::vector &value) { + if (debug_) { + std::cout << "AddFloatArray called, size: " << value.size() << std::endl; + } + + if (encoded_) { + std::cout << "Buffer is already encoded" << std::endl; + return; + } + // print_buffer(reinterpret_cast(value.data()), value.size() * 8); + entries_.emplace_back(kDataTypeFloatArray, reinterpret_cast(value.data()), value.size()); +} + +void DamEncoder::AddIntArray(const std::vector &value) { + if (debug_) { + std::cout << "AddIntArray called, size: " << value.size() << std::endl; + } + + if (encoded_) { + std::cout << "Buffer is already encoded" << std::endl; + return; + } + // print_buffer(buffer, buf_size); + entries_.emplace_back(kDataTypeIntArray, reinterpret_cast(value.data()), value.size()); +} + +void DamEncoder::AddBufferArray(const std::vector &value) { + if (debug_) { + std::cout << "AddBufferArray called, size: " << value.size() << std::endl; + } + + if (encoded_) { + std::cout << "Buffer is already encoded" << std::endl; + return; + } + size_t size = 0; + for (auto &buf: value) { + size += buf.buf_size; + } + size += 8*value.size(); + entries_.emplace_back(kDataTypeBufferArray, reinterpret_cast(&value), size); +} + + +std::uint8_t * DamEncoder::Finish(size_t &size) { + encoded_ = true; + + size = CalculateSize(); + auto buf = static_cast(calloc(size, 1)); + auto pointer = buf; + auto sig = local_version_ ? kSignatureLocal : kSignature; + memcpy(pointer, sig, strlen(sig)); + memcpy(pointer+8, &size, 8); + memcpy(pointer+16, &data_set_id_, 8); + + pointer += kPrefixLen; + for (auto& entry : entries_) { + std::size_t len; + if (entry.data_type == kDataTypeBufferArray) { + auto buffers = reinterpret_cast *>(entry.pointer); + memcpy(pointer, &entry.data_type, 8); + pointer += 8; + auto array_size = static_cast(buffers->size()); + memcpy(pointer, &array_size, 8); + pointer += 8; + auto sizes = reinterpret_cast(pointer); + for (auto &item : *buffers) { + *sizes = static_cast(item.buf_size); + sizes++; + } + len = 8*buffers->size(); + auto buf_ptr = pointer + len; + for (auto &item : *buffers) { + if (item.buf_size > 0) { + memcpy(buf_ptr, item.buffer, item.buf_size); + } + buf_ptr += item.buf_size; + len += item.buf_size; + } + } else { + memcpy(pointer, &entry.data_type, 8); + pointer += 8; + memcpy(pointer, &entry.size, 8); + pointer += 8; + len = entry.size * entry.ItemSize(); + if (len) { + memcpy(pointer, entry.pointer, len); + } + } + pointer += align(len); + } + + if ((pointer - buf) != size) { + std::cout << "Invalid encoded size: " << (pointer - buf) << std::endl; + return nullptr; + } + + return buf; +} + +std::size_t DamEncoder::CalculateSize() { + std::size_t size = kPrefixLen; + + for (auto& entry : entries_) { + size += 16; // The Type and Len + auto len = entry.size * entry.ItemSize(); + size += align(len); + } + + return size; +} + + +// DamDecoder ====== + +DamDecoder::DamDecoder(std::uint8_t *buffer, std::size_t size, bool local_version, bool debug) { + local_version_ = local_version; + buffer_ = buffer; + buf_size_ = size; + pos_ = buffer + kPrefixLen; + debug_ = debug; + + if (size >= kPrefixLen) { + memcpy(&len_, buffer + 8, 8); + memcpy(&data_set_id_, buffer + 16, 8); + } else { + len_ = 0; + data_set_id_ = 0; + } +} + +bool DamDecoder::IsValid() const { + auto sig = local_version_ ? kSignatureLocal : kSignature; + return buf_size_ >= kPrefixLen && memcmp(buffer_, sig, strlen(sig)) == 0; +} + +Buffer DamDecoder::DecodeBuffer() { + auto type = *reinterpret_cast(pos_); + if (type != kDataTypeBuffer) { + std::cout << "Data type " << type << " doesn't match bytes" << std::endl; + return {}; + } + pos_ += 8; + + auto size = *reinterpret_cast(pos_); + pos_ += 8; + + if (size == 0) { + return {}; + } + + auto ptr = reinterpret_cast(pos_); + pos_ += align(size); + return{ ptr, static_cast(size)}; +} + +std::vector DamDecoder::DecodeIntArray() { + auto type = *reinterpret_cast(pos_); + if (type != kDataTypeIntArray) { + std::cout << "Data type " << type << " doesn't match Int Array" << std::endl; + return {}; + } + pos_ += 8; + + auto array_size = *reinterpret_cast(pos_); + pos_ += 8; + auto ptr = reinterpret_cast(pos_); + pos_ += align(8 * array_size); + return {ptr, ptr + array_size}; +} + +std::vector DamDecoder::DecodeFloatArray() { + auto type = *reinterpret_cast(pos_); + if (type != kDataTypeFloatArray) { + std::cout << "Data type " << type << " doesn't match Float Array" << std::endl; + return {}; + } + pos_ += 8; + + auto array_size = *reinterpret_cast(pos_); + pos_ += 8; + + auto ptr = reinterpret_cast(pos_); + pos_ += align(8 * array_size); + return {ptr, ptr + array_size}; +} + +std::vector DamDecoder::DecodeBufferArray() { + auto type = *reinterpret_cast(pos_); + if (type != kDataTypeBufferArray) { + std::cout << "Data type " << type << " doesn't match Bytes Array" << std::endl; + return {}; + } + pos_ += 8; + + auto num = *reinterpret_cast(pos_); + pos_ += 8; + + auto size_ptr = reinterpret_cast(pos_); + auto buf_ptr = pos_ + 8 * num; + size_t total_size = 8 * num; + auto result = std::vector(num); + for (int i = 0; i < num; i++) { + auto size = size_ptr[i]; + if (buf_size_ > 0) { + result[i].buf_size = size; + result[i].buffer = buf_ptr; + buf_ptr += size; + } + total_size += size; + } + + pos_ += align(total_size); + return result; +} diff --git a/integration/xgboost/encryption_plugins/src/include/base_plugin.h b/integration/xgboost/encryption_plugins/src/include/base_plugin.h new file mode 100644 index 0000000000..dddd5a7911 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/include/base_plugin.h @@ -0,0 +1,155 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include // for uint8_t, uint32_t, int32_t, int64_t +#include // for string_view +#include // for pair +#include // for vector +#include +#include +#include + +#include "util.h" + +namespace nvflare { + +/** + * @brief Abstract interface for the encryption plugin + * + * All plugin implementations must inherit this class. + */ +class BasePlugin { +protected: + bool debug_ = false; + bool print_timing_ = false; + bool dam_debug_ = false; + +public: +/** + * @brief Constructor + * + * All inherited classes should call this constructor. + * + * @param args Entries from federated_plugin in communicator environments. + */ + explicit BasePlugin( + std::vector> const &args) { + debug_ = get_bool(args, "debug"); + print_timing_ = get_bool(args, "print_timing"); + dam_debug_ = get_bool(args, "dam_debug"); + } + + /** + * @brief Destructor + */ + virtual ~BasePlugin() = default; + + /** + * @brief Identity for the plugin used for debug + * + * This is a string with instance address and process id. + */ + std::string Ident() { + std::stringstream ss; + ss << std::hex << std::uppercase << std::setw(sizeof(void*) * 2) << std::setfill('0') << + reinterpret_cast(this); + return ss.str() + "-" + std::to_string(getpid()); + } + + /** + * @brief Encrypt the gradient pairs + * + * @param in_gpair Input g and h pairs for each record + * @param n_in The array size (2xnum_of_records) + * @param out_gpair Pointer to encrypted buffer + * @param n_out Encrypted buffer size + */ + virtual void EncryptGPairs(float const *in_gpair, std::size_t n_in, + std::uint8_t **out_gpair, std::size_t *n_out) = 0; + + /** + * @brief Process encrypted gradient pairs + * + * @param in_gpair Encrypted gradient pairs + * @param n_bytes Buffer size of Encrypted gradient + * @param out_gpair Pointer to decrypted gradient pairs + * @param out_n_bytes Decrypted buffer size + */ + virtual void SyncEncryptedGPairs(std::uint8_t const *in_gpair, std::size_t n_bytes, + std::uint8_t const **out_gpair, + std::size_t *out_n_bytes) = 0; + + /** + * @brief Reset the histogram context + * + * @param cutptrs Cut-pointers for the flattened histograms + * @param cutptr_len cutptrs array size (number of features plus one) + * @param bin_idx An array (flattened matrix) of slot index for each record/feature + * @param n_idx The size of above array + */ + virtual void ResetHistContext(std::uint32_t const *cutptrs, std::size_t cutptr_len, + std::int32_t const *bin_idx, std::size_t n_idx) = 0; + + /** + * @brief Encrypt histograms for horizontal training + * + * @param in_histogram The array for the histogram + * @param len The array size + * @param out_hist Pointer to encrypted buffer + * @param out_len Encrypted buffer size + */ + virtual void BuildEncryptedHistHori(double const *in_histogram, std::size_t len, + std::uint8_t **out_hist, std::size_t *out_len) = 0; + + /** + * @brief Process encrypted histograms for horizontal training + * + * @param buffer Buffer for encrypted histograms + * @param len Buffer size of encrypted histograms + * @param out_hist Pointer to decrypted histograms + * @param out_len Size of above array + */ + virtual void SyncEncryptedHistHori(std::uint8_t const *buffer, std::size_t len, + double **out_hist, std::size_t *out_len) = 0; + + /** + * @brief Build histograms in encrypted space for vertical training + * + * @param ridx Pointer to a matrix of row IDs for each node + * @param sizes An array of sizes of each node + * @param nidx An array for each node ID + * @param len Number of nodes + * @param out_hist Pointer to encrypted histogram buffer + * @param out_len Buffer size + */ + virtual void BuildEncryptedHistVert(std::uint64_t const **ridx, + std::size_t const *sizes, + std::int32_t const *nidx, std::size_t len, + std::uint8_t **out_hist, std::size_t *out_len) = 0; + + /** + * @brief Decrypt histogram for vertical training + * + * @param hist_buffer Encrypted histogram buffer + * @param len Buffer size of encrypted histogram + * @param out Pointer to decrypted histograms + * @param out_len Size of above array + */ + virtual void SyncEncryptedHistVert(std::uint8_t *hist_buffer, std::size_t len, + double **out, std::size_t *out_len) = 0; +}; +} // namespace nvflare diff --git a/integration/xgboost/encryption_plugins/src/include/dam.h b/integration/xgboost/encryption_plugins/src/include/dam.h new file mode 100644 index 0000000000..8677a413b1 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/include/dam.h @@ -0,0 +1,143 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once +#include +#include + +constexpr char kSignature[] = "NVDADAM1"; // DAM (Direct Accessible Marshalling) V1 +constexpr char kSignatureLocal[] = "NVDADAML"; // DAM Local version +constexpr int kPrefixLen = 24; + +constexpr int kDataTypeInt = 1; +constexpr int kDataTypeFloat = 2; +constexpr int kDataTypeString = 3; +constexpr int kDataTypeBuffer = 4; +constexpr int kDataTypeIntArray = 257; +constexpr int kDataTypeFloatArray = 258; +constexpr int kDataTypeBufferArray = 259; +constexpr int kDataTypeMap = 1025; + +/*! \brief A replacement for std::span */ +class Buffer { +public: + void *buffer; + size_t buf_size; + bool allocated; + + Buffer() : buffer(nullptr), buf_size(0), allocated(false) { + } + + Buffer(void *buffer, size_t buf_size, bool allocated=false) : + buffer(buffer), buf_size(buf_size), allocated(allocated) { + } + + Buffer(const Buffer &that): + buffer(that.buffer), buf_size(that.buf_size), allocated(false) { + } +}; + +class Entry { + public: + int64_t data_type; + const uint8_t * pointer; + int64_t size; + + Entry(int64_t data_type, const uint8_t *pointer, int64_t size) { + this->data_type = data_type; + this->pointer = pointer; + this->size = size; + } + + [[nodiscard]] std::size_t ItemSize() const + { + size_t item_size; + switch (data_type) { + case kDataTypeBuffer: + case kDataTypeString: + case kDataTypeBufferArray: + item_size = 1; + break; + default: + item_size = 8; + } + return item_size; + } +}; + +class DamEncoder { + private: + bool encoded_ = false; + bool local_version_ = false; + bool debug_ = false; + int64_t data_set_id_; + std::vector entries_; + + public: + explicit DamEncoder(int64_t data_set_id, bool local_version=false, bool debug=false) { + data_set_id_ = data_set_id; + local_version_ = local_version; + debug_ = debug; + + } + + void AddBuffer(const Buffer &buffer); + + void AddIntArray(const std::vector &value); + + void AddFloatArray(const std::vector &value); + + void AddBufferArray(const std::vector &value); + + std::uint8_t * Finish(size_t &size); + + private: + std::size_t CalculateSize(); +}; + +class DamDecoder { + private: + bool local_version_ = false; + std::uint8_t *buffer_ = nullptr; + std::size_t buf_size_ = 0; + std::uint8_t *pos_ = nullptr; + std::size_t remaining_ = 0; + int64_t data_set_id_ = 0; + int64_t len_ = 0; + bool debug_ = false; + + public: + explicit DamDecoder(std::uint8_t *buffer, std::size_t size, bool local_version=false, bool debug=false); + + [[nodiscard]] std::size_t Size() const { + return len_; + } + + [[nodiscard]] int64_t GetDataSetId() const { + return data_set_id_; + } + + [[nodiscard]] bool IsValid() const; + + Buffer DecodeBuffer(); + + std::vector DecodeIntArray(); + + std::vector DecodeFloatArray(); + + std::vector DecodeBufferArray(); +}; + +void print_buffer(const uint8_t *buffer, std::size_t size); diff --git a/integration/xgboost/encryption_plugins/src/include/data_set_ids.h b/integration/xgboost/encryption_plugins/src/include/data_set_ids.h new file mode 100644 index 0000000000..98eb20e838 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/include/data_set_ids.h @@ -0,0 +1,23 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +constexpr int kDataSetGHPairs = 1; +constexpr int kDataSetAggregation = 2; +constexpr int kDataSetAggregationWithFeatures = 3; +constexpr int kDataSetAggregationResult = 4; +constexpr int kDataSetHistograms = 5; +constexpr int kDataSetHistogramResult = 6; diff --git a/integration/xgboost/encryption_plugins/src/include/delegated_plugin.h b/integration/xgboost/encryption_plugins/src/include/delegated_plugin.h new file mode 100644 index 0000000000..7b4f353b21 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/include/delegated_plugin.h @@ -0,0 +1,66 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once +#include "base_plugin.h" + +namespace nvflare { + +// Plugin that delegates to other real plugins +class DelegatedPlugin : public BasePlugin { + + BasePlugin *plugin_{nullptr}; + +public: + explicit DelegatedPlugin(std::vector> const &args); + + ~DelegatedPlugin() override { + delete plugin_; + } + + void EncryptGPairs(const float* in_gpair, std::size_t n_in, std::uint8_t** out_gpair, std::size_t* n_out) override { + plugin_->EncryptGPairs(in_gpair, n_in, out_gpair, n_out); + } + + void SyncEncryptedGPairs(const std::uint8_t* in_gpair, std::size_t n_bytes, const std::uint8_t** out_gpair, + std::size_t* out_n_bytes) override { + plugin_->SyncEncryptedGPairs(in_gpair, n_bytes, out_gpair, out_n_bytes); + } + + void ResetHistContext(const std::uint32_t* cutptrs, std::size_t cutptr_len, const std::int32_t* bin_idx, + std::size_t n_idx) override { + plugin_->ResetHistContext(cutptrs, cutptr_len, bin_idx, n_idx); + } + + void BuildEncryptedHistHori(const double* in_histogram, std::size_t len, std::uint8_t** out_hist, + std::size_t* out_len) override { + plugin_->BuildEncryptedHistHori(in_histogram, len, out_hist, out_len); + } + + void SyncEncryptedHistHori(const std::uint8_t* buffer, std::size_t len, double** out_hist, + std::size_t* out_len) override { + plugin_->SyncEncryptedHistHori(buffer, len, out_hist, out_len); + } + + void BuildEncryptedHistVert(const std::uint64_t** ridx, const std::size_t* sizes, const std::int32_t* nidx, + std::size_t len, std::uint8_t** out_hist, std::size_t* out_len) override { + plugin_->BuildEncryptedHistVert(ridx, sizes, nidx, len, out_hist, out_len); + } + + void SyncEncryptedHistVert(std::uint8_t* hist_buffer, std::size_t len, double** out, std::size_t* out_len) override { + plugin_->SyncEncryptedHistVert(hist_buffer, len, out, out_len); + } +}; +} // namespace nvflare diff --git a/integration/xgboost/encryption_plugins/src/include/local_plugin.h b/integration/xgboost/encryption_plugins/src/include/local_plugin.h new file mode 100644 index 0000000000..2022322266 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/include/local_plugin.h @@ -0,0 +1,107 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "base_plugin.h" +#include "dam.h" + +namespace nvflare { + +// A base plugin for all plugins that handle encryption locally in C++ +class LocalPlugin : public BasePlugin { +protected: + std::vector gh_pairs_; + std::vector encrypted_gh_; + std::vector histo_; + std::vector cuts_; + std::vector slots_; + std::vector buffer_; + +public: + explicit LocalPlugin(std::vector> const &args) : + BasePlugin(args) {} + + ~LocalPlugin() override = default; + + void EncryptGPairs(const float *in_gpair, std::size_t n_in, std::uint8_t **out_gpair, + std::size_t *n_out) override; + + void SyncEncryptedGPairs(const std::uint8_t *in_gpair, std::size_t n_bytes, const std::uint8_t **out_gpair, + std::size_t *out_n_bytes) override; + + void ResetHistContext(const std::uint32_t *cutptrs, std::size_t cutptr_len, const std::int32_t *bin_idx, + std::size_t n_idx) override; + + void BuildEncryptedHistHori(const double *in_histogram, std::size_t len, std::uint8_t **out_hist, + std::size_t *out_len) override; + + void SyncEncryptedHistHori(const std::uint8_t *buffer, std::size_t len, double **out_hist, + std::size_t *out_len) override; + + void BuildEncryptedHistVert(const std::uint64_t **ridx, const std::size_t *sizes, const std::int32_t *nidx, + std::size_t len, std::uint8_t **out_hist, std::size_t *out_len) override; + + void SyncEncryptedHistVert(std::uint8_t *hist_buffer, std::size_t len, double **out, + std::size_t *out_len) override; + + // Method needs to be implemented by local plugins + + /*! + * \brief Encrypt a vector of float-pointing numbers + * \param cleartext A vector of numbers in cleartext + * \return A buffer with serialized ciphertext + */ + virtual Buffer EncryptVector(const std::vector &cleartext) = 0; + + /*! + * \brief Decrypt a serialized ciphertext into an array of numbers + * \param ciphertext A serialzied buffer of ciphertext + * \return An array of numbers + */ + virtual std::vector DecryptVector(const std::vector &ciphertext) = 0; + + /*! + * \brief Add the G&H pairs for a series of samples + * \param sample_ids A map of slot number and an array of sample IDs + * \return A map of the serialized encrypted sum of G and H for each slot + * The input and output maps must have the same size + */ + virtual std::map AddGHPairs(const std::map> &sample_ids) = 0; + + /*! + * \brief Free encrypted data buffer + * \param ciphertext The buffer for encrypted data + */ + virtual void FreeEncryptedData(Buffer &ciphertext) { + if (ciphertext.allocated && ciphertext.buffer != nullptr) { + free(ciphertext.buffer); + ciphertext.allocated = false; + } + ciphertext.buffer = nullptr; + ciphertext.buf_size = 0; + }; + +private: + + void BuildEncryptedHistVertActive(const std::uint64_t **ridx, const std::size_t *sizes, const std::int32_t *nidx, + std::size_t len, std::uint8_t **out_hist, std::size_t *out_len); + + void BuildEncryptedHistVertPassive(const std::uint64_t **ridx, const std::size_t *sizes, const std::int32_t *nidx, + std::size_t len, std::uint8_t **out_hist, std::size_t *out_len); + +}; + +} // namespace nvflare diff --git a/integration/xgboost/processor/src/include/nvflare_processor.h b/integration/xgboost/encryption_plugins/src/include/nvflare_plugin.h similarity index 77% rename from integration/xgboost/processor/src/include/nvflare_processor.h rename to integration/xgboost/encryption_plugins/src/include/nvflare_plugin.h index cb7076eaf4..87f47d622c 100644 --- a/integration/xgboost/processor/src/include/nvflare_processor.h +++ b/integration/xgboost/encryption_plugins/src/include/nvflare_plugin.h @@ -14,61 +14,65 @@ * limitations under the License. */ #pragma once + #include // for uint8_t, uint32_t, int32_t, int64_t #include // for string_view #include // for pair #include // for vector -const int kDataSetHGPairs = 1; -const int kDataSetAggregation = 2; -const int kDataSetAggregationWithFeatures = 3; -const int kDataSetAggregationResult = 4; -const int kDataSetHistograms = 5; -const int kDataSetHistogramResult = 6; - -// Opaque pointer type for the C API. -typedef void *FederatedPluginHandle; // NOLINT +#include "base_plugin.h" namespace nvflare { -// Plugin that uses Python tenseal and GRPC. -class TensealPlugin { + +// Plugin that uses Python TenSeal and GRPC. +class NvflarePlugin : public BasePlugin { // Buffer for storing encrypted gradient pairs. std::vector encrypted_gpairs_; // Buffer for histogram cut pointers (indptr of a CSC). std::vector cut_ptrs_; // Buffer for histogram index. std::vector bin_idx_; + std::vector gh_pairs_; bool feature_sent_{false}; // The feature index. std::vector features_; // Buffer for output histogram. std::vector encrypted_hist_; - std::vector hist_; + // A temporary buffer to hold return value + std::vector buffer_; + // Buffer for clear histogram + std::vector histo_; public: - TensealPlugin( - std::vector> const &args); + explicit NvflarePlugin(std::vector> const &args) : BasePlugin(args) {} + + ~NvflarePlugin() override = default; + // Gradient pairs void EncryptGPairs(float const *in_gpair, std::size_t n_in, - std::uint8_t **out_gpair, std::size_t *n_out); + std::uint8_t **out_gpair, std::size_t *n_out) override; + void SyncEncryptedGPairs(std::uint8_t const *in_gpair, std::size_t n_bytes, std::uint8_t const **out_gpair, - std::size_t *out_n_bytes); + std::size_t *out_n_bytes) override; // Histogram void ResetHistContext(std::uint32_t const *cutptrs, std::size_t cutptr_len, - std::int32_t const *bin_idx, std::size_t n_idx); + std::int32_t const *bin_idx, std::size_t n_idx) override; + void BuildEncryptedHistHori(double const *in_histogram, std::size_t len, - std::uint8_t **out_hist, std::size_t *out_len); + std::uint8_t **out_hist, std::size_t *out_len) override; + void SyncEncryptedHistHori(std::uint8_t const *buffer, std::size_t len, - double **out_hist, std::size_t *out_len); + double **out_hist, std::size_t *out_len) override; - void BuildEncryptedHistVert(std::size_t const **ridx, + void BuildEncryptedHistVert(std::uint64_t const **ridx, std::size_t const *sizes, std::int32_t const *nidx, std::size_t len, - std::uint8_t **out_hist, std::size_t *out_len); + std::uint8_t **out_hist, std::size_t *out_len) override; + void SyncEncryptedHistVert(std::uint8_t *hist_buffer, std::size_t len, - double **out, std::size_t *out_len); + double **out, std::size_t *out_len) override; }; } // namespace nvflare diff --git a/integration/xgboost/encryption_plugins/src/include/pass_thru_plugin.h b/integration/xgboost/encryption_plugins/src/include/pass_thru_plugin.h new file mode 100644 index 0000000000..3abeee4b56 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/include/pass_thru_plugin.h @@ -0,0 +1,41 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once +#include "local_plugin.h" + +namespace nvflare { + // A pass-through plugin that doesn't encrypt any data + class PassThruPlugin : public LocalPlugin { + public: + explicit PassThruPlugin(std::vector> const &args) : + LocalPlugin(args) {} + + ~PassThruPlugin() override = default; + + // Horizontal in local plugin still goes through NVFlare, so it needs to be overwritten + void BuildEncryptedHistHori(const double *in_histogram, std::size_t len, std::uint8_t **out_hist, + std::size_t *out_len) override; + + void SyncEncryptedHistHori(const std::uint8_t *buffer, std::size_t len, double **out_hist, + std::size_t *out_len) override; + + Buffer EncryptVector(const std::vector &cleartext) override; + + std::vector DecryptVector(const std::vector &ciphertext) override; + + std::map AddGHPairs(const std::map> &sample_ids) override; + }; +} // namespace nvflare diff --git a/integration/xgboost/encryption_plugins/src/include/util.h b/integration/xgboost/encryption_plugins/src/include/util.h new file mode 100644 index 0000000000..bb8ba16d1a --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/include/util.h @@ -0,0 +1,18 @@ +#pragma once +#include +#include + +std::vector> distribute_work(size_t num_jobs, size_t num_workers); + +uint32_t to_int(double d); + +double to_double(uint32_t i); + +std::string get_string(std::vector> const &args, + std::string_view const &key,std::string_view default_value = ""); + +bool get_bool(std::vector> const &args, + const std::string &key, bool default_value = false); + +int get_int(std::vector> const &args, + const std::string &key, int default_value = 0); diff --git a/integration/xgboost/encryption_plugins/src/plugins/delegated_plugin.cc b/integration/xgboost/encryption_plugins/src/plugins/delegated_plugin.cc new file mode 100644 index 0000000000..a026822799 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/plugins/delegated_plugin.cc @@ -0,0 +1,36 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "delegated_plugin.h" +#include "pass_thru_plugin.h" +#include "nvflare_plugin.h" + +namespace nvflare { + +DelegatedPlugin::DelegatedPlugin(std::vector> const &args): + BasePlugin(args) { + + auto name = get_string(args, "name"); + // std::cout << "==== Name is " << name << std::endl; + if (name == "pass-thru") { + plugin_ = new PassThruPlugin(args); + } else if (name == "nvflare") { + plugin_ = new NvflarePlugin(args); + } else { + throw std::invalid_argument{"Unknown plugin name: " + name}; + } +} + +} // namespace nvflare diff --git a/integration/xgboost/encryption_plugins/src/plugins/local_plugin.cc b/integration/xgboost/encryption_plugins/src/plugins/local_plugin.cc new file mode 100644 index 0000000000..99e304ea77 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/plugins/local_plugin.cc @@ -0,0 +1,366 @@ +/** + * Copyright 2014-2024 by XGBoost Contributors + */ +#include +#include +#include +#include "local_plugin.h" +#include "data_set_ids.h" + +namespace nvflare { + +void LocalPlugin::EncryptGPairs(const float *in_gpair, std::size_t n_in, std::uint8_t **out_gpair, std::size_t *n_out) { + if (debug_) { + std::cout << Ident() << " LocalPlugin::EncryptGPairs called with pairs size: " << n_in << std::endl; + } + + if (print_timing_) { + std::cout << "Encrypting " << n_in / 2 << " GH Pairs" << std::endl; + } + auto start = std::chrono::system_clock::now(); + + auto pairs = std::vector(in_gpair, in_gpair + n_in); + auto double_pairs = std::vector(pairs.cbegin(), pairs.cend()); + auto encrypted_data = EncryptVector(double_pairs); + + if (print_timing_) { + auto end = std::chrono::system_clock::now(); + auto secs = static_cast(std::chrono::duration_cast(end - start).count()) / 1000.0; + std::cout << "Encryption time: " << secs << " seconds" << std::endl; + } + + // Serialize with DAM so the buffers can be separated after all-gather + DamEncoder encoder(kDataSetGHPairs, true, dam_debug_); + encoder.AddBuffer(encrypted_data); + + std::size_t size; + auto buffer = encoder.Finish(size); + FreeEncryptedData(encrypted_data); + buffer_.resize(size); + std::copy_n(buffer, size, buffer_.begin()); + free(buffer); + + *out_gpair = buffer_.data(); + *n_out = buffer_.size(); + if (debug_) { + std::cout << "Encrypted GPairs:" << std::endl; + print_buffer(*out_gpair, *n_out); + } + + // Save pairs for future operations. This is only called on active site + gh_pairs_ = std::vector(double_pairs); +} + +void LocalPlugin::SyncEncryptedGPairs(const std::uint8_t *in_gpair, std::size_t n_bytes, + const std::uint8_t **out_gpair, std::size_t *out_n_bytes) { + if (debug_) { + std::cout << Ident() << " LocalPlugin::SyncEncryptedGPairs called with buffer:" << std::endl; + print_buffer(in_gpair, n_bytes); + } + + *out_n_bytes = n_bytes; + *out_gpair = in_gpair; + auto decoder = DamDecoder(const_cast(in_gpair), n_bytes, true, dam_debug_); + if (!decoder.IsValid()) { + std::cout << "LocalPlugin::SyncEncryptedGPairs called with wrong data" << std::endl; + return; + } + + auto encrypted_buffer = decoder.DecodeBuffer(); + if (debug_) { + std::cout << "Encrypted buffer size: " << encrypted_buffer.buf_size << std::endl; + } + + // The caller may free buffer so a copy is needed + auto pointer = static_cast(encrypted_buffer.buffer); + encrypted_gh_ = std::vector(pointer, pointer + encrypted_buffer.buf_size); + FreeEncryptedData(encrypted_buffer); +} + +void LocalPlugin::ResetHistContext(const std::uint32_t *cutptrs, std::size_t cutptr_len, const std::int32_t *bin_idx, + std::size_t n_idx) { + if (debug_) { + std::cout << Ident() << " LocalPlugin::ResetHistContext called with cutptrs size: " << cutptr_len << " bin_idx size: " + << n_idx << std::endl; + } + + cuts_ = std::vector(cutptrs, cutptrs + cutptr_len); + slots_ = std::vector(bin_idx, bin_idx + n_idx); +} + +void LocalPlugin::BuildEncryptedHistHori(const double *in_histogram, std::size_t len, std::uint8_t **out_hist, + std::size_t *out_len) { + if (debug_) { + std::cout << Ident() << " LocalPlugin::BuildEncryptedHistHori called with " << len << " entries" << std::endl; + print_buffer(reinterpret_cast(in_histogram), len); + } + + // don't have a local implementation yet, just encoded it and let NVFlare handle it. + DamEncoder encoder(kDataSetHistograms, false, dam_debug_); + auto histograms = std::vector(in_histogram, in_histogram + len); + encoder.AddFloatArray(histograms); + std::size_t size; + auto buffer = encoder.Finish(size); + buffer_.resize(size); + std::copy_n(buffer, size, buffer_.begin()); + free(buffer); + + *out_hist = buffer_.data(); + *out_len = buffer_.size(); + if (debug_) { + std::cout << "Output buffer" << std::endl; + print_buffer(*out_hist, *out_len); + } +} + +void LocalPlugin::SyncEncryptedHistHori(const std::uint8_t *buffer, std::size_t len, double **out_hist, + std::size_t *out_len) { + if (debug_) { + std::cout << Ident() << " LocalPlugin::SyncEncryptedHistHori called with buffer size: " << len << std::endl; + print_buffer(buffer, len); + } + auto remaining = len; + auto pointer = buffer; + + // The buffer is concatenated by AllGather. It may contain multiple DAM buffers + std::vector& result = histo_; + result.clear(); + while (remaining > kPrefixLen) { + DamDecoder decoder(const_cast(pointer), remaining, false, dam_debug_); + if (!decoder.IsValid()) { + std::cout << "Not DAM encoded histogram ignored at offset: " + << static_cast(pointer - buffer) << std::endl; + break; + } + + if (decoder.GetDataSetId() != kDataSetHistogramResult) { + throw std::runtime_error{"Invalid dataset: " + std::to_string(decoder.GetDataSetId())}; + } + + auto size = decoder.Size(); + auto histo = decoder.DecodeFloatArray(); + result.insert(result.end(), histo.cbegin(), histo.cend()); + + remaining -= size; + pointer += size; + } + + *out_hist = result.data(); + *out_len = result.size(); + + if (debug_) { + std::cout << "Output buffer" << std::endl; + print_buffer(reinterpret_cast(*out_hist), histo_.size() * sizeof(double)); + } +} + +void LocalPlugin::BuildEncryptedHistVert(const std::uint64_t **ridx, const std::size_t *sizes, const std::int32_t *nidx, + std::size_t len, std::uint8_t **out_hist, std::size_t *out_len) { + if (debug_) { + std::cout << Ident() << " LocalPlugin::BuildEncryptedHistVert called with number of nodes: " << len << std::endl; + } + + if (gh_pairs_.empty()) { + BuildEncryptedHistVertPassive(ridx, sizes, nidx, len, out_hist, out_len); + } else { + BuildEncryptedHistVertActive(ridx, sizes, nidx, len, out_hist, out_len); + } + + if (debug_) { + std::cout << "Encrypted histogram output:" << std::endl; + print_buffer(*out_hist, *out_len); + } +} + +void LocalPlugin::BuildEncryptedHistVertActive(const std::uint64_t **ridx, const std::size_t *sizes, const std::int32_t *, + std::size_t len, std::uint8_t **out_hist, std::size_t *out_len) { + + if (debug_) { + std::cout << Ident() << " LocalPlugin::BuildEncryptedHistVertActive called with " << len << " nodes" << std::endl; + } + + auto total_bin_size = cuts_.back(); + auto histo_size = total_bin_size * 2; + auto total_size = histo_size * len; + + histo_.clear(); + histo_.resize(total_size); + size_t start = 0; + for (std::size_t i = 0; i < len; i++) { + for (std::size_t j = 0; j < sizes[i]; j++) { + auto row_id = ridx[i][j]; + auto num = cuts_.size() - 1; + for (std::size_t f = 0; f < num; f++) { + int slot = slots_[f + num * row_id]; + if ((slot < 0) || (slot >= total_bin_size)) { + continue; + } + auto g = gh_pairs_[row_id * 2]; + auto h = gh_pairs_[row_id * 2 + 1]; + (histo_)[start + slot * 2] += g; + (histo_)[start + slot * 2 + 1] += h; + } + } + start += histo_size; + } + + // Histogram is in clear, can't send to all_gather. Just return empty DAM buffer + auto encoder = DamEncoder(kDataSetAggregationResult, true, dam_debug_); + encoder.AddBuffer(Buffer()); + std::size_t size; + auto buffer = encoder.Finish(size); + buffer_.resize(size); + std::copy_n(buffer, size, buffer_.begin()); + free(buffer); + *out_hist = buffer_.data(); + *out_len = size; +} + +void LocalPlugin::BuildEncryptedHistVertPassive(const std::uint64_t **ridx, const std::size_t *sizes, const std::int32_t *, + std::size_t len, std::uint8_t **out_hist, std::size_t *out_len) { + if (debug_) { + std::cout << Ident() << " LocalPlugin::BuildEncryptedHistVertPassive called with " << len << " nodes" << std::endl; + } + + auto num_slot = cuts_.back(); + auto total_size = num_slot * len; + + auto encrypted_histo = std::vector(total_size); + size_t offset = 0; + for (std::size_t i = 0; i < len; i++) { + auto num = cuts_.size() - 1; + auto row_id_map = std::map>(); + + // Empty slot leaks data so fill everything with empty vectors + for (int slot = 0; slot < num_slot; slot++) { + row_id_map.insert({slot, std::vector()}); + } + + for (std::size_t f = 0; f < num; f++) { + for (std::size_t j = 0; j < sizes[i]; j++) { + auto row_id = ridx[i][j]; + int slot = slots_[f + num * row_id]; + if ((slot < 0) || (slot >= num_slot)) { + continue; + } + auto &row_ids = row_id_map[slot]; + row_ids.push_back(static_cast(row_id)); + } + } + + if (print_timing_) { + std::size_t add_ops = 0; + for (auto &item: row_id_map) { + add_ops += item.second.size(); + } + std::cout << "Aggregating with " << add_ops << " additions" << std::endl; + } + auto start = std::chrono::system_clock::now(); + + auto encrypted_sum = AddGHPairs(row_id_map); + + if (print_timing_) { + auto end = std::chrono::system_clock::now(); + auto secs = static_cast(std::chrono::duration_cast(end - start).count()) / 1000.0; + std::cout << "Aggregation time: " << secs << " seconds" << std::endl; + } + + // Convert map back to array + for (int slot = 0; slot < num_slot; slot++) { + auto it = encrypted_sum.find(slot); + if (it != encrypted_sum.end()) { + encrypted_histo[offset + slot] = it->second; + } + } + + offset += num_slot; + } + + auto encoder = DamEncoder(kDataSetAggregationResult, true, dam_debug_); + encoder.AddBufferArray(encrypted_histo); + std::size_t size; + auto buffer = encoder.Finish(size); + for (auto &item: encrypted_histo) { + FreeEncryptedData(item); + } + buffer_.resize(size); + std::copy_n(buffer, size, buffer_.begin()); + free(buffer); + *out_hist = buffer_.data(); + *out_len = size; +} + +void LocalPlugin::SyncEncryptedHistVert(std::uint8_t *hist_buffer, std::size_t len, + double **out, std::size_t *out_len) { + if (debug_) { + std::cout << Ident() << " LocalPlugin::SyncEncryptedHistVert called with buffer size: " << len << " nodes" << std::endl; + print_buffer(hist_buffer, len); + } + + auto remaining = len; + auto pointer = hist_buffer; + + *out = nullptr; + *out_len = 0; + if (gh_pairs_.empty()) { + if (debug_) { + std::cout << Ident() << " LocalPlugin::SyncEncryptedHistVert Do nothing for passive worker" << std::endl; + } + // Do nothing for passive worker + return; + } + + // The buffer is concatenated by AllGather. It may contain multiple DAM buffers + auto first = true; + auto orig_size = histo_.size(); + while (remaining > kPrefixLen) { + DamDecoder decoder(pointer, remaining, true, dam_debug_); + if (!decoder.IsValid()) { + std::cout << "Not DAM encoded buffer ignored at offset: " + << static_cast((pointer - hist_buffer)) << std::endl; + break; + } + auto size = decoder.Size(); + if (first) { + if (histo_.empty()) { + std::cout << "No clear histogram." << std::endl; + return; + } + first = false; + } else { + auto encrypted_buf = decoder.DecodeBufferArray(); + + if (print_timing_) { + std::cout << "Decrypting " << encrypted_buf.size() << " pairs" << std::endl; + } + auto start = std::chrono::system_clock::now(); + + auto decrypted_histo = DecryptVector(encrypted_buf); + + if (print_timing_) { + auto end = std::chrono::system_clock::now(); + auto secs = static_cast(std::chrono::duration_cast(end - start).count()) / 1000.0; + std::cout << "Decryption time: " << secs << " seconds" << std::endl; + } + + if (decrypted_histo.size() != orig_size) { + std::cout << "Histo sizes are different: " << decrypted_histo.size() + << " != " << orig_size << std::endl; + } + histo_.insert(histo_.end(), decrypted_histo.cbegin(), decrypted_histo.cend()); + } + remaining -= size; + pointer += size; + } + + if (debug_) { + std::cout << Ident() << " Decrypted result size: " << histo_.size() << std::endl; + } + + // print_buffer(reinterpret_cast(result.data()), result.size()*8); + + *out = histo_.data(); + *out_len = histo_.size(); +} + +} // namespace nvflare diff --git a/integration/xgboost/encryption_plugins/src/plugins/nvflare_plugin.cc b/integration/xgboost/encryption_plugins/src/plugins/nvflare_plugin.cc new file mode 100644 index 0000000000..b062aecfa6 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/plugins/nvflare_plugin.cc @@ -0,0 +1,297 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include // for copy_n, transform +#include // for memcpy +#include // for invalid_argument +#include // for vector + +#include "nvflare_plugin.h" +#include "data_set_ids.h" +#include "dam.h" // for DamEncoder + +namespace nvflare { + +void NvflarePlugin::EncryptGPairs(float const *in_gpair, std::size_t n_in, + std::uint8_t **out_gpair, + std::size_t *n_out) { + if (debug_) { + std::cout << Ident() << " NvflarePlugin::EncryptGPairs called with pairs size: " << n_in<< std::endl; + } + + auto pairs = std::vector(in_gpair, in_gpair + n_in); + gh_pairs_ = std::vector(pairs.cbegin(), pairs.cend()); + + DamEncoder encoder(kDataSetGHPairs, false, dam_debug_); + encoder.AddFloatArray(gh_pairs_); + std::size_t size; + auto buffer = encoder.Finish(size); + if (!out_gpair) { + throw std::invalid_argument{"Invalid pointer to output gpair."}; + } + buffer_.resize(size); + std::copy_n(buffer, size, buffer_.begin()); + free(buffer); + *out_gpair = buffer_.data(); + *n_out = size; +} + +void NvflarePlugin::SyncEncryptedGPairs(std::uint8_t const *in_gpair, + std::size_t n_bytes, + std::uint8_t const **out_gpair, + std::size_t *out_n_bytes) { + if (debug_) { + std::cout << Ident() << " NvflarePlugin::SyncEncryptedGPairs called with buffer size: " << n_bytes << std::endl; + } + + // For NVFlare plugin, nothing needs to be done here + *out_n_bytes = n_bytes; + *out_gpair = in_gpair; +} + +void NvflarePlugin::ResetHistContext(std::uint32_t const *cutptrs, + std::size_t cutptr_len, + std::int32_t const *bin_idx, + std::size_t n_idx) { + if (debug_) { + std::cout << Ident() << " NvFlarePlugin::ResetHistContext called with cutptrs size: " << cutptr_len << " bin_idx size: " + << n_idx<< std::endl; + } + + cut_ptrs_.resize(cutptr_len); + std::copy_n(cutptrs, cutptr_len, cut_ptrs_.begin()); + bin_idx_.resize(n_idx); + std::copy_n(bin_idx, n_idx, this->bin_idx_.begin()); +} + +void NvflarePlugin::BuildEncryptedHistVert(std::uint64_t const **ridx, + std::size_t const *sizes, + std::int32_t const *nidx, + std::size_t len, + std::uint8_t** out_hist, + std::size_t* out_len) { + if (debug_) { + std::cout << Ident() << " NvflarePlugin::BuildEncryptedHistVert called with len: " << len << std::endl; + } + + std::int64_t data_set_id; + if (!feature_sent_) { + data_set_id = kDataSetAggregationWithFeatures; + feature_sent_ = true; + } else { + data_set_id = kDataSetAggregation; + } + + DamEncoder encoder(data_set_id, false, dam_debug_); + + // Add cuts pointers + std::vector cuts_vec(cut_ptrs_.cbegin(), cut_ptrs_.cend()); + encoder.AddIntArray(cuts_vec); + + auto num_features = cut_ptrs_.size() - 1; + auto num_samples = bin_idx_.size() / num_features; + if (debug_) { + std::cout << "Samples: " << num_samples << " Features: " << num_features << std::endl; + } + + std::vector bins; + if (data_set_id == kDataSetAggregationWithFeatures) { + if (features_.empty()) { // when is it not empty? + for (int64_t f = 0; f < num_features; f++) { + auto slot = bin_idx_[f]; + if (slot >= 0) { + // what happens if it's missing? + features_.push_back(f); + } + } + } + encoder.AddIntArray(features_); + + for (int i = 0; i < num_samples; i++) { + for (auto f : features_) { + auto index = f + i * num_features; + if (index > bin_idx_.size()) { + throw std::out_of_range{"Index is out of range: " + + std::to_string(index)}; + } + auto slot = bin_idx_[index]; + bins.push_back(slot); + } + } + encoder.AddIntArray(bins); + } + + // Add nodes to build + std::vector node_vec(len); + for (std::size_t i = 0; i < len; i++) { + node_vec[i] = nidx[i]; + } + encoder.AddIntArray(node_vec); + + // For each node, get the row_id/slot pair + auto row_ids = std::vector>(len); + for (std::size_t i = 0; i < len; ++i) { + auto& rows = row_ids[i]; + rows.resize(sizes[i]); + for (std::size_t j = 0; j < sizes[i]; j++) { + rows[j] = static_cast(ridx[i][j]); + } + encoder.AddIntArray(rows); + } + + std::size_t n{0}; + auto buffer = encoder.Finish(n); + if (debug_) { + std::cout << "Finished size: " << n << std::endl; + } + + // XGBoost doesn't allow the change of allgatherV sizes. Make sure it's big + // enough to carry histograms + auto max_slot = cut_ptrs_.back(); + auto histo_size = 2 * max_slot * sizeof(double) * len + 1024*1024; // 1M is DAM overhead + auto buf_size = histo_size > n ? histo_size : n; + + // Copy to an array so the buffer can be freed, should change encoder to return vector + buffer_.resize(buf_size); + std::copy_n(buffer, n, buffer_.begin()); + free(buffer); + + *out_hist = buffer_.data(); + *out_len = buffer_.size(); +} + +void NvflarePlugin::SyncEncryptedHistVert(std::uint8_t *buffer, + std::size_t buf_size, + double **out, + std::size_t *out_len) { + if (debug_) { + std::cout << Ident() << " NvflarePlugin::SyncEncryptedHistVert called with buffer size: " << buf_size << std::endl; + } + + auto remaining = buf_size; + char *pointer = reinterpret_cast(buffer); + + // The buffer is concatenated by AllGather. It may contain multiple DAM buffers + std::vector &result = histo_; + result.clear(); + auto max_slot = cut_ptrs_.back(); + auto array_size = 2 * max_slot * sizeof(double); + + // A new histogram array? + auto slots = static_cast(malloc(array_size)); + while (remaining > kPrefixLen) { + DamDecoder decoder(reinterpret_cast(pointer), remaining, false, dam_debug_); + if (!decoder.IsValid()) { + std::cout << "Not DAM encoded buffer ignored at offset: " + << static_cast((pointer - reinterpret_cast(buffer))) << std::endl; + break; + } + auto size = decoder.Size(); + auto node_list = decoder.DecodeIntArray(); + if (debug_) { + std::cout << "Number of nodes: " << node_list.size() << " Histo size: " << 2*max_slot << std::endl; + } + for ([[maybe_unused]] auto node : node_list) { + std::memset(slots, 0, array_size); + auto feature_list = decoder.DecodeIntArray(); + // Convert per-feature histo to a flat one + for (auto f : feature_list) { + auto base = cut_ptrs_[f]; // cut pointer for the current feature + auto bins = decoder.DecodeFloatArray(); + auto n = bins.size() / 2; + for (int i = 0; i < n; i++) { + auto index = base + i; + // [Q] Build local histogram? Why does it need to be built here? + slots[2 * index] += bins[2 * i]; + slots[2 * index + 1] += bins[2 * i + 1]; + } + } + result.insert(result.end(), slots, slots + 2 * max_slot); + } + remaining -= size; + pointer += size; + } + free(slots); + + // result is a reference to a histo_ + *out_len = result.size(); + *out = result.data(); + if (debug_) { + std::cout << "Total histogram size: " << *out_len << std::endl; + } +} + +void NvflarePlugin::BuildEncryptedHistHori(double const *in_histogram, + std::size_t len, + std::uint8_t **out_hist, + std::size_t *out_len) { + if (debug_) { + std::cout << Ident() << " NvflarePlugin::BuildEncryptedHistHori called with histo size: " << len << std::endl; + } + + DamEncoder encoder(kDataSetHistograms, false, dam_debug_); + std::vector copy(in_histogram, in_histogram + len); + encoder.AddFloatArray(copy); + + std::size_t size{0}; + auto buffer = encoder.Finish(size); + buffer_.resize(size); + std::copy_n(buffer, size, buffer_.begin()); + free(buffer); + + *out_hist = this->buffer_.data(); + *out_len = this->buffer_.size(); +} + +void NvflarePlugin::SyncEncryptedHistHori(std::uint8_t const *buffer, + std::size_t len, + double **out_hist, + std::size_t *out_len) { + if (debug_) { + std::cout << Ident() << " NvflarePlugin::SyncEncryptedHistHori called with buffer size: " << len << std::endl; + } + + auto remaining = len; + auto pointer = buffer; + + // The buffer is concatenated by AllGather. It may contain multiple DAM buffers + std::vector& result = histo_; + result.clear(); + while (remaining > kPrefixLen) { + DamDecoder decoder(const_cast(pointer), remaining, false, dam_debug_); + if (!decoder.IsValid()) { + std::cout << "Not DAM encoded histogram ignored at offset: " + << static_cast(pointer - buffer) << std::endl; + break; + } + + if (decoder.GetDataSetId() != kDataSetHistogramResult) { + throw std::runtime_error{"Invalid dataset: " + std::to_string(decoder.GetDataSetId())}; + } + + auto size = decoder.Size(); + auto histo = decoder.DecodeFloatArray(); + result.insert(result.end(), histo.cbegin(), histo.cend()); + + remaining -= size; + pointer += size; + } + + *out_hist = result.data(); + *out_len = result.size(); +} + +} // namespace nvflare diff --git a/integration/xgboost/encryption_plugins/src/plugins/pass_thru_plugin.cc b/integration/xgboost/encryption_plugins/src/plugins/pass_thru_plugin.cc new file mode 100644 index 0000000000..4a29d0ed2b --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/plugins/pass_thru_plugin.cc @@ -0,0 +1,130 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#include "pass_thru_plugin.h" +#include "data_set_ids.h" + +namespace nvflare { + +void PassThruPlugin::BuildEncryptedHistHori(const double *in_histogram, std::size_t len, + std::uint8_t **out_hist, std::size_t *out_len) { + if (debug_) { + std::cout << Ident() << " PassThruPlugin::BuildEncryptedHistHori called with " << len << " entries" << std::endl; + } + + DamEncoder encoder(kDataSetHistogramResult, true, dam_debug_); + auto array = std::vector(in_histogram, in_histogram + len); + encoder.AddFloatArray(array); + std::size_t size; + auto buffer = encoder.Finish(size); + buffer_.resize(size); + std::copy_n(buffer, size, buffer_.begin()); + free(buffer); + *out_hist = buffer_.data(); + *out_len = buffer_.size(); +} + +void PassThruPlugin::SyncEncryptedHistHori(const std::uint8_t *buffer, std::size_t len, + double **out_hist, std::size_t *out_len) { + if (debug_) { + std::cout << Ident() << " PassThruPlugin::SyncEncryptedHistHori called with buffer size: " << len << std::endl; + } + + auto remaining = len; + auto pointer = buffer; + + // The buffer is concatenated by AllGather. It may contain multiple DAM buffers + std::vector& result = histo_; + result.clear(); + while (remaining > kPrefixLen) { + DamDecoder decoder(const_cast(pointer), remaining, true, dam_debug_); + if (!decoder.IsValid()) { + std::cout << "Not DAM encoded histogram ignored at offset: " + << static_cast(pointer - buffer) << std::endl; + break; + } + auto size = decoder.Size(); + auto histo = decoder.DecodeFloatArray(); + result.insert(result.end(), histo.cbegin(), histo.cend()); + + remaining -= size; + pointer += size; + } + + *out_hist = result.data(); + *out_len = result.size(); +} + +Buffer PassThruPlugin::EncryptVector(const std::vector& cleartext) { + if (debug_ && cleartext.size() > 2) { + std::cout << "PassThruPlugin::EncryptVector called with cleartext size: " << cleartext.size() << std::endl; + } + + size_t size = cleartext.size() * sizeof(double); + auto buf = static_cast(malloc(size)); + std::copy_n(reinterpret_cast(cleartext.data()), size, buf); + + return {buf, size, true}; +} + +std::vector PassThruPlugin::DecryptVector(const std::vector& ciphertext) { + if (debug_) { + std::cout << "PassThruPlugin::DecryptVector with ciphertext size: " << ciphertext.size() << std::endl; + } + + std::vector result; + + for (auto const &v : ciphertext) { + size_t n = v.buf_size/sizeof(double); + auto p = static_cast(v.buffer); + for (int i = 0; i < n; i++) { + result.push_back(p[i]); + } + } + + return result; +} + +std::map PassThruPlugin::AddGHPairs(const std::map>& sample_ids) { + if (debug_) { + std::cout << "PassThruPlugin::AddGHPairs called with " << sample_ids.size() << " slots" << std::endl; + } + + // Can't do this in real plugin. It needs to be broken into encrypted parts + auto gh_pairs = DecryptVector(std::vector{Buffer(encrypted_gh_.data(), encrypted_gh_.size())}); + + auto result = std::map(); + for (auto const &entry : sample_ids) { + auto rows = entry.second; + double g = 0.0; + double h = 0.0; + + for (auto row : rows) { + g += gh_pairs[2 * row]; + h += gh_pairs[2 * row + 1]; + } + // In real plugin, the sum should be still in encrypted state. No need to do this step + auto encrypted_sum = EncryptVector(std::vector{g, h}); + // print_buffer(reinterpret_cast(encrypted_sum.buffer), encrypted_sum.buf_size); + result.insert({entry.first, encrypted_sum}); + } + + return result; +} + +} // namespace nvflare diff --git a/integration/xgboost/encryption_plugins/src/plugins/plugin_main.cc b/integration/xgboost/encryption_plugins/src/plugins/plugin_main.cc new file mode 100644 index 0000000000..4c1d43a6f8 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/plugins/plugin_main.cc @@ -0,0 +1,184 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include // for shared_ptr +#include // for invalid_argument +#include // for string_view +#include // for vector +#include // for transform + +#include "delegated_plugin.h" + +// Opaque pointer type for the C API. +typedef void *FederatedPluginHandle; // NOLINT + +namespace nvflare { +namespace { +// The opaque type for the C handle. +using CHandleT = std::shared_ptr *; +// Actual representation used in C++ code base. +using HandleT = std::remove_pointer_t; + +std::string &GlobalErrorMsg() { + static thread_local std::string msg; + return msg; +} + +// Perform handle handling for C API functions. +template auto CApiGuard(FederatedPluginHandle handle, Fn &&fn) { + auto pptr = static_cast(handle); + if (!pptr) { + return 1; + } + + try { + if constexpr (std::is_void_v>) { + fn(*pptr); + return 0; + } else { + return fn(*pptr); + } + } catch (std::exception const &e) { + GlobalErrorMsg() = e.what(); + return 1; + } +} +} // namespace +} // namespace nvflare + +#if defined(_MSC_VER) || defined(_WIN32) +#define NVF_C __declspec(dllexport) +#else +#define NVF_C __attribute__((visibility("default"))) +#endif // defined(_MSC_VER) || defined(_WIN32) + +extern "C" { +NVF_C char const *FederatedPluginErrorMsg() { + return nvflare::GlobalErrorMsg().c_str(); +} + +FederatedPluginHandle NVF_C FederatedPluginCreate(int argc, char const **argv) { + // std::cout << "==== FedreatedPluginCreate called with argc=" << argc << std::endl; + using namespace nvflare; + try { + auto pptr = new std::shared_ptr; + std::vector> args; + std::transform( + argv, argv + argc, std::back_inserter(args), [](char const *carg) { + // Split a key value pair in contructor argument: `key=value` + std::string_view arg{carg}; + auto idx = arg.find('='); + if (idx == std::string_view::npos) { + // `=` not found + throw std::invalid_argument{"Invalid argument:" + std::string{arg}}; + } + auto key = arg.substr(0, idx); + auto value = arg.substr(idx + 1); + return std::make_pair(key, value); + }); + *pptr = std::make_shared(args); + // std::cout << "==== Plugin created: " << pptr << std::endl; + return pptr; + } catch (std::exception const &e) { + // std::cout << "==== Create exception " << e.what() << std::endl; + GlobalErrorMsg() = e.what(); + return nullptr; + } +} + +int NVF_C FederatedPluginClose(FederatedPluginHandle handle) { + using namespace nvflare; + auto pptr = static_cast(handle); + if (!pptr) { + return 1; + } + + delete pptr; + + return 0; +} + +int NVF_C FederatedPluginEncryptGPairs(FederatedPluginHandle handle, + float const *in_gpair, size_t n_in, + uint8_t **out_gpair, size_t *n_out) { + using namespace nvflare; + return CApiGuard(handle, [&](HandleT const &plugin) { + plugin->EncryptGPairs(in_gpair, n_in, out_gpair, n_out); + return 0; + }); +} + +int NVF_C FederatedPluginSyncEncryptedGPairs(FederatedPluginHandle handle, + uint8_t const *in_gpair, + size_t n_bytes, + uint8_t const **out_gpair, + size_t *n_out) { + using namespace nvflare; + return CApiGuard(handle, [&](HandleT const &plugin) { + plugin->SyncEncryptedGPairs(in_gpair, n_bytes, out_gpair, n_out); + }); +} + +int NVF_C FederatedPluginResetHistContextVert(FederatedPluginHandle handle, + uint32_t const *cutptrs, + size_t cutptr_len, + int32_t const *bin_idx, + size_t n_idx) { + using namespace nvflare; + return CApiGuard(handle, [&](HandleT const &plugin) { + plugin->ResetHistContext(cutptrs, cutptr_len, bin_idx, n_idx); + }); +} + +int NVF_C FederatedPluginBuildEncryptedHistVert( + FederatedPluginHandle handle, uint64_t const **ridx, size_t const *sizes, + int32_t const *nidx, size_t len, uint8_t **out_hist, size_t *out_len) { + using namespace nvflare; + return CApiGuard(handle, [&](HandleT const &plugin) { + plugin->BuildEncryptedHistVert(ridx, sizes, nidx, len, out_hist, out_len); + }); +} + +int NVF_C FederatedPluginSyncEncryptedHistVert(FederatedPluginHandle handle, + uint8_t *in_hist, size_t len, + double **out_hist, + size_t *out_len) { + using namespace nvflare; + return CApiGuard(handle, [&](HandleT const &plugin) { + plugin->SyncEncryptedHistVert(in_hist, len, out_hist, out_len); + }); +} + +int NVF_C FederatedPluginBuildEncryptedHistHori(FederatedPluginHandle handle, + double const *in_hist, + size_t len, uint8_t **out_hist, + size_t *out_len) { + using namespace nvflare; + return CApiGuard(handle, [&](HandleT const &plugin) { + plugin->BuildEncryptedHistHori(in_hist, len, out_hist, out_len); + }); +} + +int NVF_C FederatedPluginSyncEncryptedHistHori(FederatedPluginHandle handle, + uint8_t const *in_hist, + size_t len, double **out_hist, + size_t *out_len) { + using namespace nvflare; + return CApiGuard(handle, [&](HandleT const &plugin) { + plugin->SyncEncryptedHistHori(in_hist, len, out_hist, out_len); + return 0; + }); +} +} // extern "C" diff --git a/integration/xgboost/encryption_plugins/src/plugins/util.cc b/integration/xgboost/encryption_plugins/src/plugins/util.cc new file mode 100644 index 0000000000..a0cbd922d4 --- /dev/null +++ b/integration/xgboost/encryption_plugins/src/plugins/util.cc @@ -0,0 +1,99 @@ +/** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include "util.h" + + +constexpr double kScaleFactor = 1000000.0; + +std::vector> distribute_work(size_t num_jobs, size_t const num_workers) { + std::vector> result; + auto num = num_jobs / num_workers; + auto remainder = num_jobs % num_workers; + int start = 0; + for (int i = 0; i < num_workers; i++) { + auto stop = static_cast((start + num - 1)); + if (i < remainder) { + // If jobs cannot be evenly distributed, first few workers take an extra one + stop += 1; + } + + if (start <= stop) { + result.emplace_back(start, stop); + } + start = stop + 1; + } + + // Verify all jobs are distributed + int sum = 0; + for (auto &item: result) { + sum += item.second - item.first + 1; + } + + if (sum != num_jobs) { + std::cout << "Distribution error" << std::endl; + } + + return result; +} + +uint32_t to_int(double d) { + auto int_val = static_cast(d * kScaleFactor); + return static_cast(int_val); +} + +double to_double(uint32_t i) { + auto int_val = static_cast(i); + return static_cast(int_val / kScaleFactor); +} + +std::string get_string(std::vector> const &args, + std::string_view const &key, std::string_view const default_value) { + + auto it = find_if( + args.begin(), args.end(), + [key](const auto &p) { return p.first == key; }); + + if (it != args.end()) { + return std::string{it->second}; + } + + return std::string{default_value}; +} + +bool get_bool(std::vector> const &args, + const std::string &key, bool default_value) { + std::string value = get_string(args, key, ""); + if (value.empty()) { + return default_value; + } + std::transform(value.begin(), value.end(), value.begin(), [](unsigned char c) { return std::tolower(c); }); + auto true_values = std::set < std::string_view > {"true", "yes", "y", "on", "1"}; + return true_values.count(value) > 0; +} + +int get_int(std::vector> const &args, + const std::string &key, int default_value) { + + auto value = get_string(args, key, ""); + if (value.empty()) { + return default_value; + } + + return stoi(value, nullptr); +} diff --git a/integration/xgboost/encryption_plugins/tests/CMakeLists.txt b/integration/xgboost/encryption_plugins/tests/CMakeLists.txt new file mode 100644 index 0000000000..04580bdd59 --- /dev/null +++ b/integration/xgboost/encryption_plugins/tests/CMakeLists.txt @@ -0,0 +1,14 @@ +file(GLOB_RECURSE TEST_SOURCES "*.cc") + +target_sources(xgb_nvflare_test PRIVATE ${TEST_SOURCES}) + +target_include_directories(xgb_nvflare_test + PRIVATE + ${GTEST_INCLUDE_DIRS} + ${xgb_nvflare_SOURCE_DIR/tests} + ${xgb_nvflare_SOURCE_DIR}/src) + +message("Include Dir: ${GTEST_INCLUDE_DIRS}") +target_link_libraries(xgb_nvflare_test + PRIVATE + ${GTEST_LIBRARIES}) diff --git a/integration/xgboost/processor/tests/test_dam.cc b/integration/xgboost/encryption_plugins/tests/test_dam.cc similarity index 65% rename from integration/xgboost/processor/tests/test_dam.cc rename to integration/xgboost/encryption_plugins/tests/test_dam.cc index 5573d5440d..345978b110 100644 --- a/integration/xgboost/processor/tests/test_dam.cc +++ b/integration/xgboost/encryption_plugins/tests/test_dam.cc @@ -19,20 +19,45 @@ TEST(DamTest, TestEncodeDecode) { double float_array[] = {1.1, 1.2, 1.3, 1.4}; int64_t int_array[] = {123, 456, 789}; + char buf1[] = "short"; + char buf2[] = "very long"; + DamEncoder encoder(123); + auto b1 = Buffer(buf1, strlen(buf1)); + auto b2 = Buffer(buf2, strlen(buf2)); + encoder.AddBuffer(b1); + encoder.AddBuffer(b2); + + std::vector b{b1, b2}; + encoder.AddBufferArray(b); + auto f = std::vector(float_array, float_array + 4); encoder.AddFloatArray(f); + auto i = std::vector(int_array, int_array + 3); encoder.AddIntArray(i); + size_t size; auto buf = encoder.Finish(size); std::cout << "Encoded size is " << size << std::endl; - DamDecoder decoder(buf.data(), size); + // Decoding test + DamDecoder decoder(buf, size); EXPECT_EQ(decoder.IsValid(), true); EXPECT_EQ(decoder.GetDataSetId(), 123); + auto new_buf1 = decoder.DecodeBuffer(); + EXPECT_EQ(0, memcmp(new_buf1.buffer, buf1, new_buf1.buf_size)); + + auto new_buf2 = decoder.DecodeBuffer(); + EXPECT_EQ(0, memcmp(new_buf2.buffer, buf2, new_buf2.buf_size)); + + auto buf_vec = decoder.DecodeBufferArray(); + EXPECT_EQ(2, buf_vec.size()); + EXPECT_EQ(0, memcmp(buf_vec[0].buffer, buf1, buf_vec[0].buf_size)); + EXPECT_EQ(0, memcmp(buf_vec[1].buffer, buf2, buf_vec[1].buf_size)); + auto float_vec = decoder.DecodeFloatArray(); EXPECT_EQ(0, memcmp(float_vec.data(), float_array, float_vec.size()*8)); diff --git a/integration/xgboost/processor/tests/test_main.cc b/integration/xgboost/encryption_plugins/tests/test_main.cc similarity index 100% rename from integration/xgboost/processor/tests/test_main.cc rename to integration/xgboost/encryption_plugins/tests/test_main.cc diff --git a/integration/xgboost/processor/tests/test_tenseal.py b/integration/xgboost/encryption_plugins/tests/test_tenseal.py similarity index 100% rename from integration/xgboost/processor/tests/test_tenseal.py rename to integration/xgboost/encryption_plugins/tests/test_tenseal.py diff --git a/integration/xgboost/processor/CMakeLists.txt b/integration/xgboost/processor/CMakeLists.txt deleted file mode 100644 index 056fd365e2..0000000000 --- a/integration/xgboost/processor/CMakeLists.txt +++ /dev/null @@ -1,46 +0,0 @@ -cmake_minimum_required(VERSION 3.19) -project(proc_nvflare LANGUAGES CXX C VERSION 1.0) -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_BUILD_TYPE Debug) - -option(GOOGLE_TEST "Build google tests" OFF) - -file(GLOB_RECURSE LIB_SRC "src/*.cc") - -add_library(proc_nvflare SHARED ${LIB_SRC}) -set_target_properties(proc_nvflare PROPERTIES - CXX_STANDARD 17 - CXX_STANDARD_REQUIRED ON - POSITION_INDEPENDENT_CODE ON - ENABLE_EXPORTS ON -) -target_include_directories(proc_nvflare PRIVATE ${proc_nvflare_SOURCE_DIR}/src/include) - -if (APPLE) - add_link_options("LINKER:-object_path_lto,$_lto.o") - add_link_options("LINKER:-cache_path_lto,${CMAKE_BINARY_DIR}/LTOCache") -endif () - -#-- Unit Tests -if(GOOGLE_TEST) - find_package(GTest REQUIRED) - enable_testing() - add_executable(proc_test) - target_link_libraries(proc_test PRIVATE proc_nvflare) - - - target_include_directories(proc_test PRIVATE ${proc_nvflare_SOURCE_DIR}/src/include - ${XGB_SRC}/src - ${XGB_SRC}/rabit/include - ${XGB_SRC}/include - ${XGB_SRC}/dmlc-core/include - ${XGB_SRC}/tests) - - add_subdirectory(${proc_nvflare_SOURCE_DIR}/tests) - - add_test( - NAME TestProcessor - COMMAND proc_test - WORKING_DIRECTORY ${proc_nvflare_BINARY_DIR}) - -endif() diff --git a/integration/xgboost/processor/README.md b/integration/xgboost/processor/README.md deleted file mode 100644 index e879081b84..0000000000 --- a/integration/xgboost/processor/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Build Instruction - -``` sh -cd NVFlare/integration/xgboost/processor -mkdir build -cd build -cmake .. -make -``` - -See [tests](./tests) for simple examples. \ No newline at end of file diff --git a/integration/xgboost/processor/src/dam/dam.cc b/integration/xgboost/processor/src/dam/dam.cc deleted file mode 100644 index 10625ab9b5..0000000000 --- a/integration/xgboost/processor/src/dam/dam.cc +++ /dev/null @@ -1,146 +0,0 @@ -/** - * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include "dam.h" - -void print_buffer(uint8_t *buffer, int size) { - for (int i = 0; i < size; i++) { - auto c = buffer[i]; - std::cout << std::hex << (int) c << " "; - } - std::cout << std::endl << std::dec; -} - -// DamEncoder ====== -void DamEncoder::AddFloatArray(const std::vector &value) { - if (encoded) { - std::cout << "Buffer is already encoded" << std::endl; - return; - } - auto buf_size = value.size() * 8; - uint8_t *buffer = static_cast(malloc(buf_size)); - memcpy(buffer, value.data(), buf_size); - entries->push_back(new Entry(kDataTypeFloatArray, buffer, value.size())); -} - -void DamEncoder::AddIntArray(const std::vector &value) { - std::cout << "AddIntArray called, size: " << value.size() << std::endl; - if (encoded) { - std::cout << "Buffer is already encoded" << std::endl; - return; - } - auto buf_size = value.size()*8; - std::cout << "Allocating " << buf_size << " bytes" << std::endl; - uint8_t *buffer = static_cast(malloc(buf_size)); - memcpy(buffer, value.data(), buf_size); - // print_buffer(buffer, buf_size); - entries->push_back(new Entry(kDataTypeIntArray, buffer, value.size())); -} - -std::vector DamEncoder::Finish(size_t &size) { - encoded = true; - - size = calculate_size(); - std::vector buf(size); - auto pointer = buf.data(); - memcpy(pointer, kSignature, strlen(kSignature)); - memcpy(pointer + 8, &size, 8); - memcpy(pointer + 16, &data_set_id, 8); - - pointer += kPrefixLen; - for (auto entry : *entries) { - memcpy(pointer, &entry->data_type, 8); - pointer += 8; - memcpy(pointer, &entry->size, 8); - pointer += 8; - int len = 8*entry->size; - memcpy(pointer, entry->pointer, len); - free(entry->pointer); - pointer += len; - // print_buffer(entry->pointer, entry->size*8); - } - - if ((pointer - buf.data()) != size) { - throw std::runtime_error{"Invalid encoded size: " + - std::to_string(pointer - buf.data())}; - } - - return buf; -} - -std::size_t DamEncoder::calculate_size() { - auto size = kPrefixLen; - - for (auto entry : *entries) { - size += 16; // The Type and Len - size += entry->size * 8; // All supported data types are 8 bytes - } - - return size; -} - - -// DamDecoder ====== - -DamDecoder::DamDecoder(std::uint8_t const *buffer, std::size_t size) { - this->buffer = buffer; - this->buf_size = size; - this->pos = buffer + kPrefixLen; - if (size >= kPrefixLen) { - memcpy(&len, buffer + 8, 8); - memcpy(&data_set_id, buffer + 16, 8); - } else { - len = 0; - data_set_id = 0; - } -} - -bool DamDecoder::IsValid() { - return buf_size >= kPrefixLen && memcmp(buffer, kSignature, strlen(kSignature)) == 0; -} - -std::vector DamDecoder::DecodeIntArray() { - auto type = *reinterpret_cast(pos); - if (type != kDataTypeIntArray) { - std::cout << "Data type " << type << " doesn't match Int Array" - << std::endl; - return std::vector(); - } - pos += 8; - - auto len = *reinterpret_cast(pos); - pos += 8; - auto ptr = reinterpret_cast(pos); - pos += 8 * len; - return std::vector(ptr, ptr + len); -} - -std::vector DamDecoder::DecodeFloatArray() { - auto type = *reinterpret_cast(pos); - if (type != kDataTypeFloatArray) { - std::cout << "Data type " << type << " doesn't match Float Array" << std::endl; - return std::vector(); - } - pos += 8; - - auto len = *reinterpret_cast(pos); - pos += 8; - - auto ptr = reinterpret_cast(pos); - pos += 8*len; - return std::vector(ptr, ptr + len); -} diff --git a/integration/xgboost/processor/src/include/dam.h b/integration/xgboost/processor/src/include/dam.h deleted file mode 100644 index 7afdf983af..0000000000 --- a/integration/xgboost/processor/src/include/dam.h +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#pragma once -#include -#include // for int64_t -#include // for size_t - -const char kSignature[] = "NVDADAM1"; // DAM (Direct Accessible Marshalling) V1 -const int kPrefixLen = 24; - -const int kDataTypeInt = 1; -const int kDataTypeFloat = 2; -const int kDataTypeString = 3; -const int kDataTypeIntArray = 257; -const int kDataTypeFloatArray = 258; - -const int kDataTypeMap = 1025; - -class Entry { - public: - int64_t data_type; - uint8_t * pointer; - int64_t size; - - Entry(int64_t data_type, uint8_t *pointer, int64_t size) { - this->data_type = data_type; - this->pointer = pointer; - this->size = size; - } -}; - -class DamEncoder { - private: - bool encoded = false; - int64_t data_set_id; - std::vector *entries = new std::vector(); - - public: - explicit DamEncoder(int64_t data_set_id) { - this->data_set_id = data_set_id; - } - - void AddIntArray(const std::vector &value); - - void AddFloatArray(const std::vector &value); - - std::vector Finish(size_t &size); - - private: - std::size_t calculate_size(); -}; - -class DamDecoder { - private: - std::uint8_t const *buffer = nullptr; - std::size_t buf_size = 0; - std::uint8_t const *pos = nullptr; - std::size_t remaining = 0; - int64_t data_set_id = 0; - int64_t len = 0; - - public: - explicit DamDecoder(std::uint8_t const *buffer, std::size_t size); - - size_t Size() { - return len; - } - - int64_t GetDataSetId() { - return data_set_id; - } - - bool IsValid(); - - std::vector DecodeIntArray(); - - std::vector DecodeFloatArray(); -}; - -void print_buffer(uint8_t *buffer, int size); diff --git a/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc b/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc deleted file mode 100644 index 3e742b14ef..0000000000 --- a/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc +++ /dev/null @@ -1,378 +0,0 @@ -/** - * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "nvflare_processor.h" - -#include "dam.h" // for DamEncoder -#include -#include // for copy_n, transform -#include // for memcpy -#include // for shared_ptr -#include // for invalid_argument -#include // for string_view -#include // for vector - -namespace nvflare { -namespace { -// The opaque type for the C handle. -using CHandleT = std::shared_ptr *; -// Actual representation used in C++ code base. -using HandleT = std::remove_pointer_t; - -std::string &GlobalErrorMsg() { - static thread_local std::string msg; - return msg; -} - -// Perform handle handling for C API functions. -template auto CApiGuard(FederatedPluginHandle handle, Fn &&fn) { - auto pptr = static_cast(handle); - if (!pptr) { - return 1; - } - - try { - if constexpr (std::is_void_v>) { - fn(*pptr); - return 0; - } else { - return fn(*pptr); - } - } catch (std::exception const &e) { - GlobalErrorMsg() = e.what(); - return 1; - } -} -} // namespace - -TensealPlugin::TensealPlugin( - std::vector> const &args) { - if (!args.empty()) { - throw std::invalid_argument{"Invaid arguments for the tenseal plugin."}; - } -} - -void TensealPlugin::EncryptGPairs(float const *in_gpair, std::size_t n_in, - std::uint8_t **out_gpair, - std::size_t *n_out) { - std::vector pairs(n_in); - std::copy_n(in_gpair, n_in, pairs.begin()); - DamEncoder encoder(kDataSetHGPairs); - encoder.AddFloatArray(pairs); - encrypted_gpairs_ = encoder.Finish(*n_out); - if (!out_gpair) { - throw std::invalid_argument{"Invalid pointer to output gpair."}; - } - *out_gpair = encrypted_gpairs_.data(); - *n_out = encrypted_gpairs_.size(); -} - -void TensealPlugin::SyncEncryptedGPairs(std::uint8_t const *in_gpair, - std::size_t n_bytes, - std::uint8_t const **out_gpair, - std::size_t *out_n_bytes) { - *out_n_bytes = n_bytes; - *out_gpair = in_gpair; -} - -void TensealPlugin::ResetHistContext(std::uint32_t const *cutptrs, - std::size_t cutptr_len, - std::int32_t const *bin_idx, - std::size_t n_idx) { - // fixme: this doesn't have to be copied multiple times. - this->cut_ptrs_.resize(cutptr_len); - std::copy_n(cutptrs, cutptr_len, cut_ptrs_.begin()); - this->bin_idx_.resize(n_idx); - std::copy_n(bin_idx, n_idx, this->bin_idx_.begin()); -} - -void TensealPlugin::BuildEncryptedHistVert(std::size_t const **ridx, - std::size_t const *sizes, - std::int32_t const *nidx, - std::size_t len, - std::uint8_t** out_hist, - std::size_t* out_len) { - std::int64_t data_set_id; - if (!feature_sent_) { - data_set_id = kDataSetAggregationWithFeatures; - feature_sent_ = true; - } else { - data_set_id = kDataSetAggregation; - } - - DamEncoder encoder(data_set_id); - - // Add cuts pointers - std::vector cuts_vec(cut_ptrs_.cbegin(), cut_ptrs_.cend()); - encoder.AddIntArray(cuts_vec); - - auto num_features = cut_ptrs_.size() - 1; - auto num_samples = bin_idx_.size() / num_features; - - if (data_set_id == kDataSetAggregationWithFeatures) { - if (features_.empty()) { // when is it not empty? - for (std::size_t f = 0; f < num_features; f++) { - auto slot = bin_idx_[f]; - if (slot >= 0) { - // what happens if it's missing? - features_.push_back(f); - } - } - } - encoder.AddIntArray(features_); - - std::vector bins; - for (int i = 0; i < num_samples; i++) { - for (auto f : features_) { - auto index = f + i * num_features; - if (index > bin_idx_.size()) { - throw std::out_of_range{"Index is out of range: " + - std::to_string(index)}; - } - auto slot = bin_idx_[index]; - bins.push_back(slot); - } - } - encoder.AddIntArray(bins); - } - - // Add nodes to build - std::vector node_vec(len); - std::copy_n(nidx, len, node_vec.begin()); - encoder.AddIntArray(node_vec); - - // For each node, get the row_id/slot pair - for (std::size_t i = 0; i < len; ++i) { - std::vector rows(sizes[i]); - std::copy_n(ridx[i], sizes[i], rows.begin()); - encoder.AddIntArray(rows); - } - - std::size_t n{0}; - encrypted_hist_ = encoder.Finish(n); - - *out_hist = encrypted_hist_.data(); - *out_len = encrypted_hist_.size(); -} - -void TensealPlugin::SyncEncryptedHistVert(std::uint8_t *buffer, - std::size_t buf_size, double **out, - std::size_t *out_len) { - auto remaining = buf_size; - char *pointer = reinterpret_cast(buffer); - - // The buffer is concatenated by AllGather. It may contain multiple DAM - // buffers - std::vector &result = hist_; - result.clear(); - auto max_slot = cut_ptrs_.back(); - auto array_size = 2 * max_slot * sizeof(double); - // A new histogram array? - double *slots = static_cast(malloc(array_size)); - while (remaining > kPrefixLen) { - DamDecoder decoder(reinterpret_cast(pointer), remaining); - if (!decoder.IsValid()) { - std::cout << "Not DAM encoded buffer ignored at offset: " - << static_cast( - (pointer - reinterpret_cast(buffer))) - << std::endl; - break; - } - auto size = decoder.Size(); - auto node_list = decoder.DecodeIntArray(); - for (auto node : node_list) { - std::memset(slots, 0, array_size); - auto feature_list = decoder.DecodeIntArray(); - // Convert per-feature histo to a flat one - for (auto f : feature_list) { - auto base = cut_ptrs_[f]; // cut pointer for the current feature - auto bins = decoder.DecodeFloatArray(); - auto n = bins.size() / 2; - for (int i = 0; i < n; i++) { - auto index = base + i; - // [Q] Build local histogram? Why does it need to be built here? - slots[2 * index] += bins[2 * i]; - slots[2 * index + 1] += bins[2 * i + 1]; - } - } - result.insert(result.end(), slots, slots + 2 * max_slot); - } - remaining -= size; - pointer += size; - } - free(slots); - - *out_len = result.size(); - *out = result.data(); -} - -void TensealPlugin::BuildEncryptedHistHori(double const *in_histogram, - std::size_t len, - std::uint8_t **out_hist, - std::size_t *out_len) { - DamEncoder encoder(kDataSetHistograms); - std::vector copy(in_histogram, in_histogram + len); - encoder.AddFloatArray(copy); - - std::size_t size{0}; - this->encrypted_hist_ = encoder.Finish(size); - - *out_hist = this->encrypted_hist_.data(); - *out_len = this->encrypted_hist_.size(); -} - -void TensealPlugin::SyncEncryptedHistHori(std::uint8_t const *buffer, - std::size_t len, double **out_hist, - std::size_t *out_len) { - DamDecoder decoder(reinterpret_cast(buffer), len); - if (!decoder.IsValid()) { - std::cout << "Not DAM encoded buffer, ignored" << std::endl; - } - - if (decoder.GetDataSetId() != kDataSetHistogramResult) { - throw std::runtime_error{"Invalid dataset: " + - std::to_string(decoder.GetDataSetId())}; - } - this->hist_ = decoder.DecodeFloatArray(); - *out_hist = this->hist_.data(); - *out_len = this->hist_.size(); -} -} // namespace nvflare - -#if defined(_MSC_VER) || defined(_WIN32) -#define NVF_C __declspec(dllexport) -#else -#define NVF_C __attribute__((visibility("default"))) -#endif // defined(_MSC_VER) || defined(_WIN32) - -extern "C" { -NVF_C char const *FederatedPluginErrorMsg() { - return nvflare::GlobalErrorMsg().c_str(); -} - -FederatedPluginHandle NVF_C FederatedPluginCreate(int argc, char const **argv) { - using namespace nvflare; - try { - CHandleT pptr = new std::shared_ptr; - std::vector> args; - std::transform( - argv, argv + argc, std::back_inserter(args), [](char const *carg) { - // Split a key value pair in contructor argument: `key=value` - std::string_view arg{carg}; - auto idx = arg.find('='); - if (idx == std::string_view::npos) { - // `=` not found - throw std::invalid_argument{"Invalid argument:" + std::string{arg}}; - } - auto key = arg.substr(0, idx); - auto value = arg.substr(idx + 1); - return std::make_pair(key, value); - }); - *pptr = std::make_shared(args); - return pptr; - } catch (std::exception const &e) { - GlobalErrorMsg() = e.what(); - return nullptr; - } -} - -int NVF_C FederatedPluginClose(FederatedPluginHandle handle) { - using namespace nvflare; - auto pptr = static_cast(handle); - if (!pptr) { - return 1; - } - - try { - delete pptr; - } catch (std::exception const &e) { - GlobalErrorMsg() = e.what(); - return 1; - } - return 0; -} - -int NVF_C FederatedPluginEncryptGPairs(FederatedPluginHandle handle, - float const *in_gpair, size_t n_in, - uint8_t **out_gpair, size_t *n_out) { - using namespace nvflare; - return CApiGuard(handle, [&](HandleT plugin) { - plugin->EncryptGPairs(in_gpair, n_in, out_gpair, n_out); - return 0; - }); -} - -int NVF_C FederatedPluginSyncEncryptedGPairs(FederatedPluginHandle handle, - uint8_t const *in_gpair, - size_t n_bytes, - uint8_t const **out_gpair, - size_t *n_out) { - using namespace nvflare; - return CApiGuard(handle, [&](HandleT plugin) { - plugin->SyncEncryptedGPairs(in_gpair, n_bytes, out_gpair, n_out); - }); -} - -int NVF_C FederatedPluginResetHistContextVert(FederatedPluginHandle handle, - uint32_t const *cutptrs, - size_t cutptr_len, - int32_t const *bin_idx, - size_t n_idx) { - using namespace nvflare; - return CApiGuard(handle, [&](HandleT plugin) { - plugin->ResetHistContext(cutptrs, cutptr_len, bin_idx, n_idx); - }); -} - -int NVF_C FederatedPluginBuildEncryptedHistVert( - FederatedPluginHandle handle, uint64_t const **ridx, size_t const *sizes, - int32_t const *nidx, size_t len, uint8_t **out_hist, size_t *out_len) { - using namespace nvflare; - return CApiGuard(handle, [&](HandleT plugin) { - plugin->BuildEncryptedHistVert(ridx, sizes, nidx, len, out_hist, out_len); - }); -} - -int NVF_C FederatedPluginSyncEnrcyptedHistVert(FederatedPluginHandle handle, - uint8_t *in_hist, size_t len, - double **out_hist, - size_t *out_len) { - using namespace nvflare; - return CApiGuard(handle, [&](HandleT plugin) { - plugin->SyncEncryptedHistVert(in_hist, len, out_hist, out_len); - }); -} - -int NVF_C FederatedPluginBuildEncryptedHistHori(FederatedPluginHandle handle, - double const *in_hist, - size_t len, uint8_t **out_hist, - size_t *out_len) { - using namespace nvflare; - return CApiGuard(handle, [&](HandleT plugin) { - plugin->BuildEncryptedHistHori(in_hist, len, out_hist, out_len); - }); -} - -int NVF_C FederatedPluginSyncEnrcyptedHistHori(FederatedPluginHandle handle, - uint8_t const *in_hist, - size_t len, double **out_hist, - size_t *out_len) { - using namespace nvflare; - return CApiGuard(handle, [&](HandleT plugin) { - plugin->SyncEncryptedHistHori(in_hist, len, out_hist, out_len); - return 0; - }); -} -} // extern "C" diff --git a/integration/xgboost/processor/tests/CMakeLists.txt b/integration/xgboost/processor/tests/CMakeLists.txt deleted file mode 100644 index 893d8738dc..0000000000 --- a/integration/xgboost/processor/tests/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -file(GLOB_RECURSE TEST_SOURCES "*.cc") - -target_sources(proc_test PRIVATE ${TEST_SOURCES}) - -target_include_directories(proc_test - PRIVATE - ${GTEST_INCLUDE_DIRS} - ${proc_nvflare_SOURCE_DIR/tests} - ${proc_nvflare_SOURCE_DIR}/src) - -message("Include Dir: ${GTEST_INCLUDE_DIRS}") -target_link_libraries(proc_test - PRIVATE - ${GTEST_LIBRARIES}) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py index b559306440..b32535ac15 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py @@ -109,6 +109,8 @@ class Constant: HEADER_KEY_HORIZONTAL = "xgb.horizontal" HEADER_KEY_ORIGINAL_BUF_SIZE = "xgb.original_buf_size" HEADER_KEY_IN_AGGR = "xgb.in_aggr" + HEADER_KEY_WORLD_SIZE = "xgb.world_size" + HEADER_KEY_SIZE_DICT = "xgb.size_dict" DUMMY_BUFFER_SIZE = 4 @@ -122,8 +124,6 @@ class Constant: class SplitMode: ROW = 0 COL = 1 - COL_SECURE = 2 - ROW_SECURE = 3 # Mapping of text training mode to split mode @@ -132,10 +132,10 @@ class SplitMode: "horizontal": SplitMode.ROW, "v": SplitMode.COL, "vertical": SplitMode.COL, - "hs": SplitMode.ROW_SECURE, - "horizontal_secure": SplitMode.ROW_SECURE, - "vs": SplitMode.COL_SECURE, - "vertical_secure": SplitMode.COL_SECURE, + "hs": SplitMode.ROW, + "horizontal_secure": SplitMode.ROW, + "vs": SplitMode.COL, + "vertical_secure": SplitMode.COL, } SECURE_TRAINING_MODES = {"hs", "horizontal_secure", "vs", "vertical_secure"} diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/proto/federated_pb2.pyi b/nvflare/app_opt/xgboost/histogram_based_v2/proto/federated_pb2.pyi index 7ad47596df..7dc3e6dde1 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/proto/federated_pb2.pyi +++ b/nvflare/app_opt/xgboost/histogram_based_v2/proto/federated_pb2.pyi @@ -6,7 +6,7 @@ from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union DESCRIPTOR: _descriptor.FileDescriptor class DataType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () HALF: _ClassVar[DataType] FLOAT: _ClassVar[DataType] DOUBLE: _ClassVar[DataType] @@ -21,7 +21,7 @@ class DataType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): UINT64: _ClassVar[DataType] class ReduceOperation(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = [] + __slots__ = () MAX: _ClassVar[ReduceOperation] MIN: _ClassVar[ReduceOperation] SUM: _ClassVar[ReduceOperation] @@ -48,7 +48,7 @@ BITWISE_OR: ReduceOperation BITWISE_XOR: ReduceOperation class AllgatherRequest(_message.Message): - __slots__ = ["sequence_number", "rank", "send_buffer"] + __slots__ = ("sequence_number", "rank", "send_buffer") SEQUENCE_NUMBER_FIELD_NUMBER: _ClassVar[int] RANK_FIELD_NUMBER: _ClassVar[int] SEND_BUFFER_FIELD_NUMBER: _ClassVar[int] @@ -58,13 +58,13 @@ class AllgatherRequest(_message.Message): def __init__(self, sequence_number: _Optional[int] = ..., rank: _Optional[int] = ..., send_buffer: _Optional[bytes] = ...) -> None: ... class AllgatherReply(_message.Message): - __slots__ = ["receive_buffer"] + __slots__ = ("receive_buffer",) RECEIVE_BUFFER_FIELD_NUMBER: _ClassVar[int] receive_buffer: bytes def __init__(self, receive_buffer: _Optional[bytes] = ...) -> None: ... class AllgatherVRequest(_message.Message): - __slots__ = ["sequence_number", "rank", "send_buffer"] + __slots__ = ("sequence_number", "rank", "send_buffer") SEQUENCE_NUMBER_FIELD_NUMBER: _ClassVar[int] RANK_FIELD_NUMBER: _ClassVar[int] SEND_BUFFER_FIELD_NUMBER: _ClassVar[int] @@ -74,13 +74,13 @@ class AllgatherVRequest(_message.Message): def __init__(self, sequence_number: _Optional[int] = ..., rank: _Optional[int] = ..., send_buffer: _Optional[bytes] = ...) -> None: ... class AllgatherVReply(_message.Message): - __slots__ = ["receive_buffer"] + __slots__ = ("receive_buffer",) RECEIVE_BUFFER_FIELD_NUMBER: _ClassVar[int] receive_buffer: bytes def __init__(self, receive_buffer: _Optional[bytes] = ...) -> None: ... class AllreduceRequest(_message.Message): - __slots__ = ["sequence_number", "rank", "send_buffer", "data_type", "reduce_operation"] + __slots__ = ("sequence_number", "rank", "send_buffer", "data_type", "reduce_operation") SEQUENCE_NUMBER_FIELD_NUMBER: _ClassVar[int] RANK_FIELD_NUMBER: _ClassVar[int] SEND_BUFFER_FIELD_NUMBER: _ClassVar[int] @@ -94,13 +94,13 @@ class AllreduceRequest(_message.Message): def __init__(self, sequence_number: _Optional[int] = ..., rank: _Optional[int] = ..., send_buffer: _Optional[bytes] = ..., data_type: _Optional[_Union[DataType, str]] = ..., reduce_operation: _Optional[_Union[ReduceOperation, str]] = ...) -> None: ... class AllreduceReply(_message.Message): - __slots__ = ["receive_buffer"] + __slots__ = ("receive_buffer",) RECEIVE_BUFFER_FIELD_NUMBER: _ClassVar[int] receive_buffer: bytes def __init__(self, receive_buffer: _Optional[bytes] = ...) -> None: ... class BroadcastRequest(_message.Message): - __slots__ = ["sequence_number", "rank", "send_buffer", "root"] + __slots__ = ("sequence_number", "rank", "send_buffer", "root") SEQUENCE_NUMBER_FIELD_NUMBER: _ClassVar[int] RANK_FIELD_NUMBER: _ClassVar[int] SEND_BUFFER_FIELD_NUMBER: _ClassVar[int] @@ -112,7 +112,7 @@ class BroadcastRequest(_message.Message): def __init__(self, sequence_number: _Optional[int] = ..., rank: _Optional[int] = ..., send_buffer: _Optional[bytes] = ..., root: _Optional[int] = ...) -> None: ... class BroadcastReply(_message.Message): - __slots__ = ["receive_buffer"] + __slots__ = ("receive_buffer",) RECEIVE_BUFFER_FIELD_NUMBER: _ClassVar[int] receive_buffer: bytes def __init__(self, receive_buffer: _Optional[bytes] = ...) -> None: ... diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/proto/federated_pb2_grpc.py b/nvflare/app_opt/xgboost/histogram_based_v2/proto/federated_pb2_grpc.py index 45eee5c8dd..549d0e4ffc 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/proto/federated_pb2_grpc.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/proto/federated_pb2_grpc.py @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: federated.proto +# Protobuf Python Version: 4.25.1 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc import nvflare.app_opt.xgboost.histogram_based_v2.proto.federated_pb2 as federated__pb2 - class FederatedStub(object): """Missing associated documentation comment in .proto file.""" diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py index 1b98829711..0d8e8bec1d 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py @@ -30,7 +30,9 @@ from nvflare.fuel.utils.obj_utils import get_logger from nvflare.utils.cli_utils import get_package_root -LOADER_PARAMS_LIBRARY_PATH = "LIBRARY_PATH" +PLUGIN_PARAM_KEY = "federated_plugin" +PLUGIN_KEY_NAME = "name" +PLUGIN_KEY_PATH = "path" class XGBClientRunner(AppRunner, FLComponent): @@ -135,7 +137,7 @@ def run(self, ctx: dict): self.logger.info(f"server address is {self._server_addr}") communicator_env = { - "xgboost_communicator": "federated", + "dmlc_communicator": "federated", "federated_server_address": f"{self._server_addr}", "federated_world_size": self._world_size, "federated_rank": self._rank, @@ -145,38 +147,35 @@ def run(self, ctx: dict): self.logger.info("XGBoost non-secure training") else: xgb_plugin_name = ConfigService.get_str_var( - name="xgb_plugin_name", conf=SystemConfigs.RESOURCES_CONF, default="nvflare" + name="xgb_plugin_name", conf=SystemConfigs.RESOURCES_CONF, default=None ) - - xgb_loader_params = ConfigService.get_dict_var( - name="xgb_loader_params", conf=SystemConfigs.RESOURCES_CONF, default={} + xgb_plugin_path = ConfigService.get_str_var( + name="xgb_plugin_path", conf=SystemConfigs.RESOURCES_CONF, default=None + ) + xgb_plugin_params: dict = ConfigService.get_dict_var( + name=PLUGIN_PARAM_KEY, conf=SystemConfigs.RESOURCES_CONF, default={} ) - # Library path is frequently used, add a scalar config var and overwrite what's in the dict - xgb_library_path = ConfigService.get_str_var(name="xgb_library_path", conf=SystemConfigs.RESOURCES_CONF) - if xgb_library_path: - xgb_loader_params[LOADER_PARAMS_LIBRARY_PATH] = xgb_library_path + # path and name can be overwritten by scalar configuration + if xgb_plugin_name: + xgb_plugin_params[PLUGIN_KEY_NAME] = xgb_plugin_name - lib_path = xgb_loader_params.get(LOADER_PARAMS_LIBRARY_PATH, None) - if not lib_path: - xgb_loader_params[LOADER_PARAMS_LIBRARY_PATH] = str(get_package_root() / "libs") + if xgb_plugin_path: + xgb_plugin_params[PLUGIN_KEY_PATH] = xgb_plugin_path - xgb_proc_params = ConfigService.get_dict_var( - name="xgb_proc_params", conf=SystemConfigs.RESOURCES_CONF, default={} - ) + # Set default plugin name + if not xgb_plugin_params.get(PLUGIN_KEY_NAME): + xgb_plugin_params[PLUGIN_KEY_NAME] = "cuda_paillier" - self.logger.info( - f"XGBoost secure mode: {self._training_mode} plugin_name: {xgb_plugin_name} " - f"proc_params: {xgb_proc_params} loader_params: {xgb_loader_params}" - ) + if not xgb_plugin_params.get(PLUGIN_KEY_PATH): + # This only works on Linux. Need to support other platforms + lib_ext = "so" + lib_name = f"lib{xgb_plugin_params[PLUGIN_KEY_NAME]}.{lib_ext}" + xgb_plugin_params[PLUGIN_KEY_PATH] = str(get_package_root() / "libs" / lib_name) - communicator_env.update( - { - "plugin_name": xgb_plugin_name, - "proc_params": xgb_proc_params, - "loader_params": xgb_loader_params, - } - ) + self.logger.info(f"XGBoost secure training: {self._training_mode} Params: {xgb_plugin_params}") + + communicator_env[PLUGIN_PARAM_KEY] = xgb_plugin_params with xgb.collective.CommunicatorContext(**communicator_env): # Load the data. Dmatrix must be created with column split mode in CommunicatorContext for vertical FL diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_server_runner.py b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_server_runner.py index 32e708c90e..e4a8796a38 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_server_runner.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_server_runner.py @@ -29,8 +29,8 @@ def run(self, ctx: dict): self._world_size = ctx.get(Constant.RUNNER_CTX_WORLD_SIZE) xgb_federated.run_federated_server( + n_workers=self._world_size, port=self._port, - world_size=self._world_size, ) self._stopped = True diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py index 5aad654824..ea5607d828 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py @@ -299,6 +299,10 @@ def _process_after_all_gather_v(self, fl_ctx: FLContext): self._process_after_all_gather_v_vertical(fl_ctx) def _process_after_all_gather_v_vertical(self, fl_ctx: FLContext): + reply = fl_ctx.get_prop(Constant.PARAM_KEY_REPLY) + size_dict = reply.get_header(Constant.HEADER_KEY_SIZE_DICT) + total_size = sum(size_dict.values()) + self.info(fl_ctx, f"{total_size=} {size_dict=}") rcv_buf = fl_ctx.get_prop(Constant.PARAM_KEY_RCV_BUF) # this rcv_buf is a list of replies from ALL clients! rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) @@ -309,7 +313,7 @@ def _process_after_all_gather_v_vertical(self, fl_ctx: FLContext): if not self.clear_ghs: # this is non-label client - don't care about the results - dummy = os.urandom(Constant.DUMMY_BUFFER_SIZE) + dummy = os.urandom(total_size) fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=dummy, private=True, sticky=False) self.info(fl_ctx, "non-label client: return dummy buffer back to XGB") return @@ -352,16 +356,45 @@ def _process_after_all_gather_v_vertical(self, fl_ctx: FLContext): self.info(fl_ctx, f"final aggr: {gid=} features={fid_list}") result = self.data_converter.encode_aggregation_result(final_result, fl_ctx) + + # XGBoost expects every work has a set of histograms. They are already combined here so + # just add zeros + zero_result = final_result + for result_list in zero_result.values(): + for item in result_list: + size = len(item.aggregated_hist) + item.aggregated_hist = [(0, 0)] * size + zero_buf = self.data_converter.encode_aggregation_result(zero_result, fl_ctx) + world_size = len(size_dict) + for _ in range(world_size - 1): + result += zero_buf + + # XGBoost checks that the size of allgatherv is not changed + padding_size = total_size - len(result) + if padding_size > 0: + result += b"\x00" * padding_size + elif padding_size < 0: + self.error(fl_ctx, f"The original size {total_size} is not big enough for data size {len(result)}") + fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=result, private=True, sticky=False) def _process_after_all_gather_v_horizontal(self, fl_ctx: FLContext): + reply = fl_ctx.get_prop(Constant.PARAM_KEY_REPLY) + world_size = reply.get_header(Constant.HEADER_KEY_WORLD_SIZE) encrypted_histograms = fl_ctx.get_prop(Constant.PARAM_KEY_RCV_BUF) rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) if not isinstance(encrypted_histograms, CKKSVector): return self._abort(f"rank {rank}: expect a CKKSVector but got {type(encrypted_histograms)}", fl_ctx) histograms = encrypted_histograms.decrypt(secret_key=self.tenseal_context.secret_key()) + result = self.data_converter.encode_histograms_result(histograms, fl_ctx) + + # XGBoost expect every worker returns a histogram, all zeros are returned for other workers + zeros = [0.0] * len(histograms) + zero_buf = self.data_converter.encode_histograms_result(zeros, fl_ctx) + for _ in range(world_size - 1): + result += zero_buf fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=result, private=True, sticky=False) def handle_event(self, event_type: str, fl_ctx: FLContext): @@ -376,7 +409,7 @@ def handle_event(self, event_type: str, fl_ctx: FLContext): else: self.debug(fl_ctx, "Tenseal module not loaded, horizontal secure XGBoost is not supported") except Exception as ex: - self.debug(fl_ctx, f"Can't load tenseal context, horizontal secure XGBoost is not supported: {ex}") + self.error(fl_ctx, f"Can't load tenseal context, horizontal secure XGBoost is not supported: {ex}") self.tenseal_context = None elif event_type == EventType.END_RUN: self.tenseal_context = None diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py index 53e936c7d4..47e44d17d6 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py @@ -39,6 +39,8 @@ def __init__(self): self.aggr_result_dict = None self.aggr_result_to_send = None self.aggr_result_lock = threading.Lock() + self.world_size = 0 + self.size_dict = None if tenseal_imported: decomposers.register() @@ -124,6 +126,10 @@ def _process_before_all_gather_v(self, fl_ctx: FLContext): else: self.info(fl_ctx, f"no aggr data from {rank=}") + if self.size_dict is None: + self.size_dict = {} + + self.size_dict[rank] = request.get_header(Constant.HEADER_KEY_ORIGINAL_BUF_SIZE) # only send a dummy to the Server fl_ctx.set_prop( key=Constant.PARAM_KEY_SEND_BUF, value=os.urandom(Constant.DUMMY_BUFFER_SIZE), private=True, sticky=False @@ -146,6 +152,7 @@ def _process_after_all_gather_v(self, fl_ctx: FLContext): horizontal = fl_ctx.get_prop(Constant.HEADER_KEY_HORIZONTAL) reply.set_header(Constant.HEADER_KEY_ENCRYPTED_DATA, True) reply.set_header(Constant.HEADER_KEY_HORIZONTAL, horizontal) + with self.aggr_result_lock: if not self.aggr_result_to_send: if not self.aggr_result_dict: @@ -159,6 +166,10 @@ def _process_after_all_gather_v(self, fl_ctx: FLContext): # reset aggr_result_dict for next gather self.aggr_result_dict = None + self.world_size = len(self.size_dict) + reply.set_header(Constant.HEADER_KEY_WORLD_SIZE, self.world_size) + reply.set_header(Constant.HEADER_KEY_SIZE_DICT, self.size_dict) + if horizontal: length = self.aggr_result_to_send.size() else: diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py b/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py new file mode 100644 index 0000000000..6540eb519c --- /dev/null +++ b/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py @@ -0,0 +1,50 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import xgboost as xgb + +from nvflare.app_opt.xgboost.data_loader import XGBDataLoader +from nvflare.app_opt.xgboost.histogram_based_v2.defs import TRAINING_MODE_MAPPING, SplitMode + + +class SecureDataLoader(XGBDataLoader): + def __init__(self, rank: int, folder: str): + """Reads CSV dataset and return XGB data matrix in vertical secure mode. + + Args: + rank: Rank of the site + folder: Folder to find the CSV files + """ + self.rank = rank + self.folder = folder + + def load_data(self, client_id: str, training_mode: str): + + train_path = f"{self.folder}/site-{self.rank + 1}/train.csv" + valid_path = f"{self.folder}/site-{self.rank + 1}/valid.csv" + + if training_mode not in TRAINING_MODE_MAPPING: + raise ValueError(f"Invalid training_mode: {training_mode}") + + data_split_mode = TRAINING_MODE_MAPPING[training_mode] + + if self.rank == 0 or data_split_mode == SplitMode.ROW: + label = "&label_column=0" + else: + label = "" + + train_data = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=data_split_mode) + valid_data = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=data_split_mode) + + return train_data, valid_data From 8366a9b672a41eb95e79ed0d45607a2a7ca90afa Mon Sep 17 00:00:00 2001 From: Yuhong Wen Date: Sat, 3 Aug 2024 23:52:43 -0400 Subject: [PATCH 16/16] Moved the simulator server logger init earlier. (#2753) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- nvflare/private/fed/app/simulator/simulator_runner.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nvflare/private/fed/app/simulator/simulator_runner.py b/nvflare/private/fed/app/simulator/simulator_runner.py index 274ad2a889..3952ea5db5 100644 --- a/nvflare/private/fed/app/simulator/simulator_runner.py +++ b/nvflare/private/fed/app/simulator/simulator_runner.py @@ -174,6 +174,10 @@ def setup(self): self._cleanup_workspace() init_security_content_service(self.args.workspace) + os.makedirs(os.path.join(self.simulator_root, "server")) + log_file = os.path.join(self.simulator_root, "server", WorkspaceConstants.LOG_FILE_NAME) + add_logfile_handler(log_file) + try: data_bytes, job_name, meta = self.validate_job_data() @@ -501,9 +505,6 @@ def start_server_app(self, args): args.workspace = os.path.join(self.simulator_root, "server") os.chdir(args.workspace) - log_file = os.path.join(self.simulator_root, "server", WorkspaceConstants.LOG_FILE_NAME) - add_logfile_handler(log_file) - args.server_config = os.path.join("config", JobConstants.SERVER_JOB_CONFIG) app_custom_folder = os.path.join(app_server_root, "custom") if os.path.isdir(app_custom_folder) and app_custom_folder not in sys.path: