Skip to content

Introduce sled-agent-config-reconciler skeleton #8063

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ members = [
"sled-agent",
"sled-agent/api",
"sled-agent/bootstrap-agent-api",
"sled-agent/config-reconciler",
"sled-agent/repo-depot-api",
"sled-agent/types",
"sled-diagnostics",
Expand Down Expand Up @@ -275,6 +276,7 @@ default-members = [
"sled-agent",
"sled-agent/api",
"sled-agent/bootstrap-agent-api",
"sled-agent/config-reconciler",
"sled-agent/repo-depot-api",
"sled-agent/types",
"sled-diagnostics",
Expand Down Expand Up @@ -662,6 +664,7 @@ similar-asserts = "1.7.0"
sled = "=0.34.7"
sled-agent-api = { path = "sled-agent/api" }
sled-agent-client = { path = "clients/sled-agent-client" }
sled-agent-config-reconciler = { path = "sled-agent/config-reconciler" }
sled-agent-types = { path = "sled-agent/types" }
sled-diagnostics = { path = "sled-diagnostics" }
sled-hardware = { path = "sled-hardware" }
Expand Down
1 change: 1 addition & 0 deletions sled-agent/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ sha2.workspace = true
sha3.workspace = true
sled-agent-api.workspace = true
sled-agent-client.workspace = true
sled-agent-config-reconciler.workspace = true
sled-agent-types.workspace = true
sled-diagnostics.workspace = true
sled-hardware.workspace = true
Expand Down
43 changes: 43 additions & 0 deletions sled-agent/config-reconciler/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
[package]
name = "sled-agent-config-reconciler"
version = "0.1.0"
edition = "2021"
license = "MPL-2.0"

[lints]
workspace = true

[dependencies]
anyhow.workspace = true
async-trait.workspace = true
camino.workspace = true
camino-tempfile.workspace = true
chrono.workspace = true
derive_more.workspace = true
dropshot.workspace = true
glob.workspace = true
id-map.workspace = true
illumos-utils.workspace = true
key-manager.workspace = true
nexus-sled-agent-shared.workspace = true
omicron-common.workspace = true
omicron-uuid-kinds.workspace = true
sled-agent-api.workspace = true
sled-agent-types.workspace = true
sled-hardware.workspace = true
sled-storage.workspace = true
slog.workspace = true
slog-error-chain.workspace = true
thiserror.workspace = true
tokio.workspace = true
tufaceous-artifact.workspace = true
zone.workspace = true
omicron-workspace-hack.workspace = true

[dev-dependencies]
omicron-test-utils.workspace = true
proptest.workspace = true
test-strategy.workspace = true

[features]
testing = []
162 changes: 162 additions & 0 deletions sled-agent/config-reconciler/src/dataset_serialization_task.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.

//! Many of the ZFS operations sled-agent performs are not atomic, because they
//! involve multiple lower-level ZFS operations. This module implements a tokio
//! task that serializes a set of operations to ensure no two operations could
//! be executing concurrently.
//!
//! It uses the common pattern of "a task with a mpsc channel to send requests,
//! using oneshot channels to send responses".

use camino::Utf8PathBuf;
use id_map::IdMap;
use id_map::IdMappable;
use nexus_sled_agent_shared::inventory::InventoryDataset;
use omicron_common::disk::DatasetConfig;
use omicron_common::zpool_name::ZpoolName;
use omicron_uuid_kinds::DatasetUuid;
use sled_storage::config::MountConfig;
use sled_storage::manager::NestedDatasetConfig;
use sled_storage::manager::NestedDatasetListOptions;
use sled_storage::manager::NestedDatasetLocation;
use slog::Logger;
use slog::warn;
use std::collections::BTreeSet;
use std::sync::Arc;
use tokio::sync::mpsc;

#[derive(Debug, thiserror::Error)]
pub enum DatasetTaskError {
#[error("cannot perform dataset operations: waiting for key manager")]
WaitingForKeyManager,
#[error("dataset task busy; cannot service new requests")]
Busy,
#[error("internal error: dataset task exited!")]
Exited,
}

#[derive(Debug)]
pub(crate) struct DatasetEnsureResult(IdMap<SingleDatasetEnsureResult>);

#[derive(Debug, Clone)]
struct SingleDatasetEnsureResult {
config: DatasetConfig,
state: DatasetState,
}

impl IdMappable for SingleDatasetEnsureResult {
type Id = DatasetUuid;

fn id(&self) -> Self::Id {
self.config.id
}
}

#[derive(Debug, Clone)]
enum DatasetState {
Mounted,
FailedToMount, // TODO add error
UuidMismatch(DatasetUuid),
ZpoolNotFound,
ParentMissingFromConfig,
ParentFailedToMount,
}

#[derive(Debug)]
pub(crate) struct DatasetTaskHandle(mpsc::Sender<DatasetTaskRequest>);

impl DatasetTaskHandle {
pub fn spawn_dataset_task(
mount_config: Arc<MountConfig>,
base_log: &Logger,
) -> Self {
// We don't expect too many concurrent requests to this task, and want
// to detect "the task is wedged" pretty quickly. Common operations:
//
// 1. Reconciler wants to ensure datasets (at most 1 at a time)
// 2. Inventory requests from Nexus (likely at most 3 at a time)
// 3. Support bundle operations (unlikely to be multiple concurrently)
//
// so we'll pick a number that allows all of those plus a little
// overhead.
let (request_tx, request_rx) = mpsc::channel(16);

tokio::spawn(
DatasetTask {
mount_config,
request_rx,
log: base_log.new(slog::o!("component" => "DatasetTask")),
}
.run(),
);

Self(request_tx)
}

pub async fn datasets_ensure(
&self,
_dataset_configs: IdMap<DatasetConfig>,
_zpools: BTreeSet<ZpoolName>,
) -> Result<DatasetEnsureResult, DatasetTaskError> {
unimplemented!()
}

pub async fn inventory(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can punt on this question if it's answered in a follow-up PR, but with no rustdocs on a pub method, I figured I'd ask: why is the zpools argument being passed here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, this is a little messy. It's because the implementation of this is basically

let datasets_of_interest = [
// We care about the zpool itself, and all direct children.
zpool.to_string(),
// Likewise, we care about the encrypted dataset, and all
// direct children.
format!("{zpool}/{CRYPT_DATASET}"),
// The zone dataset gives us additional context on "what zones
// have datasets provisioned".
format!("{zpool}/{ZONE_DATASET}"),
];
info!(log, "Listing datasets within zpool"; "zpool" => zpool.to_string());
Zfs::get_dataset_properties(
datasets_of_interest.as_slice(),
WhichDatasets::SelfAndChildren,
)
.map_err(Error::Other)
, which needs to know in which zpools we're looking for datasets to report.

FWIW, I think this method isn't really pub, because the type it's on isn't directly exposed outside the crate. But it's still a good question! Once the rest of the work starts to land I wonder if there's a way to not need to pass this argument.

&self,
_zpools: BTreeSet<ZpoolName>,
) -> Result<Vec<InventoryDataset>, DatasetTaskError> {
unimplemented!()
}

pub async fn nested_dataset_ensure_mounted(
&self,
_dataset: NestedDatasetLocation,
) -> Result<Utf8PathBuf, DatasetTaskError> {
unimplemented!()
}

pub async fn nested_dataset_ensure(
&self,
_config: NestedDatasetConfig,
) -> Result<(), DatasetTaskError> {
unimplemented!()
}

pub async fn nested_dataset_destroy(
&self,
_name: NestedDatasetLocation,
) -> Result<(), DatasetTaskError> {
unimplemented!()
}

pub async fn nested_dataset_list(
&self,
_name: NestedDatasetLocation,
_options: NestedDatasetListOptions,
) -> Result<Vec<NestedDatasetConfig>, DatasetTaskError> {
unimplemented!()
}
}

struct DatasetTask {
mount_config: Arc<MountConfig>,
request_rx: mpsc::Receiver<DatasetTaskRequest>,
log: Logger,
}

impl DatasetTask {
async fn run(mut self) {
while let Some(req) = self.request_rx.recv().await {
self.handle_request(req).await;
}
warn!(self.log, "all request handles closed; exiting dataset task");
}

async fn handle_request(&mut self, _req: DatasetTaskRequest) {
unimplemented!()
}
}

enum DatasetTaskRequest {}
Loading
Loading