Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions kbs/src/plugins/implementations/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@
pub mod nebula_ca;
#[cfg(feature = "pkcs11")]
pub mod pkcs11;
pub mod provisioner;
pub mod resource;
pub mod sample;

#[cfg(feature = "nebula-ca-plugin")]
pub use nebula_ca::{NebulaCaPlugin, NebulaCaPluginConfig};
#[cfg(feature = "pkcs11")]
pub use pkcs11::{Pkcs11Backend, Pkcs11Config};
pub use provisioner::{Provisioner, ProvisionerConfig};
pub use resource::{RepositoryConfig, ResourceStorage};
pub use sample::{Sample, SampleConfig};
265 changes: 265 additions & 0 deletions kbs/src/plugins/implementations/provisioner.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,265 @@
// SPDX-License-Identifier: Apache-2.0

//! Provisioner plugin for KBS.
//!
//! Generates per-VM LUKS encryption keys and stores them in the KBS resource
//! storage so that attested guests can retrieve them via the standard
//! `/kbs/v0/resource/...` path.
//!
//! The hook sidecar calls `POST /kbs/v0/provisioner/provision` before the VM
//! boots and receives `{uuid, oemstring, mrconfigid}` to inject into SMBIOS.
//! On boot the guest attests and fetches the key through the `resource` plugin.

use std::collections::HashMap;

use actix_web::http::Method;
use anyhow::{anyhow, bail, Result};
use base64::{engine::general_purpose::STANDARD as B64, Engine};
use key_value_storage::{KeyValueStorageInstance, SetParameters, StorageBackendConfig};
use rand::Rng;
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha384};
use uuid::Uuid;

use super::resource::RESOURCE_STORAGE_NAMESPACE;

// Config (deserialized from kbs-config.toml)
Comment thread
MatiasVara marked this conversation as resolved.
#[derive(Deserialize, Clone, Debug, PartialEq)]
pub struct ProvisionerConfig {
/// URL that will be embedded in `initdata.toml` for the guest.
pub kbs_url: String,

/// Length of the random LUKS key in bytes (default 32).
#[serde(default = "default_key_length")]
pub key_length: usize,
}

fn default_key_length() -> usize {
32
}

pub struct Provisioner {
storage: KeyValueStorageInstance,
kbs_url: String,
key_length: usize,
// TODO: This in-memory cache grows unboundedly and uses std::sync::Mutex
// which can block the async runtime. More critically, the cache does
// not survive KBS restarts: if the sidecar re-provisions the same VM
// after a restart, a new UUID/key pair is generated, replacing the
// original resource. The VM's LUKS volume would then fail to unlock
// because the key no longer matches. The cache (or the vm->resource
// mapping) must be persisted to the storage backend so it can be
// restored on startup.
Comment thread
MatiasVara marked this conversation as resolved.
cache: tokio::sync::RwLock<HashMap<String, ProvisionResponse>>,
}

#[derive(Serialize, Deserialize, Clone)]
struct ProvisionRequest {
vm_name: String,
#[serde(default = "default_namespace")]
namespace: String,
}

fn default_namespace() -> String {
"default".into()
}

#[derive(Serialize, Clone)]
struct ProvisionResponse {
uuid: String,
oemstring: String,
mrconfigid: String,
resource_path: String,
}

#[derive(Serialize)]
struct StatusResponse {
status: String,
}

impl Provisioner {
pub async fn new(
config: ProvisionerConfig,
storage_backend_config: &StorageBackendConfig,
) -> Result<Self> {
let storage = storage_backend_config
.backends
.to_client_with_namespace(
storage_backend_config.storage_type,
RESOURCE_STORAGE_NAMESPACE,
)
.await
.map_err(|e| anyhow!("Provisioner: failed to init storage backend: {e}"))?;

Ok(Self {
storage,
kbs_url: config.kbs_url,
key_length: config.key_length,
cache:tokio::sync::RwLock::new(HashMap::new()),
})
}
}

impl Provisioner {
// TODO: This generates an alphanumeric string (~5.95 bits of entropy per
// char, ~190 bits for 32 chars). Use OsRng with raw bytes + base64/hex
// encoding to get a full 256-bit key.
fn generate_random_key(&self) -> String {
const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
let mut rng = rand::thread_rng();
(0..self.key_length)
.map(|_| {
let idx = rng.gen_range(0..CHARSET.len());
CHARSET[idx] as char
})
.collect()
Comment thread
MatiasVara marked this conversation as resolved.
}

// This init-data is not meant to be compatible with CoCo.
// See https://gitlab.com/berrange/cvminjector#initial-data-format
Comment thread
MatiasVara marked this conversation as resolved.
fn generate_initdata_toml(&self, resource_path: &str) -> String {
format!(
"algorithm = \"sha384\"\n\
version = \"0.1.0\"\n\
\n\
[data]\n\
\"trustee.kbs.url\" = \"{}\"\n\
\"trustee.kbs.resource\" = \"{}\"\n",
self.kbs_url, resource_path
Comment thread
MatiasVara marked this conversation as resolved.
)
}

// See https://gitlab.com/berrange/cvminjector#confidential-data-format
fn generate_confdata_toml(luks_key: &str) -> String {
format!(
"version = \"0.1.0\"\n\
\n\
[data]\n\
\"io.cryptsetup.key.text.root\" = \"{}\"\n",
luks_key
)
}

async fn handle_provision(&self, body: &[u8]) -> Result<Vec<u8>> {
let req: ProvisionRequest = serde_json::from_slice(body)
.map_err(|e| anyhow!("invalid JSON body: {e}"))?;

let cache_key = format!("{}/{}", req.namespace, req.vm_name);

// Return cached result if already provisioned
if let Some(cached) = self.cache.read().await.get(&cache_key) {
return Ok(serde_json::to_vec(cached)?);
}

// TODO: UUIDv4 is random but not tied to VM identity. A more robust
// approach would derive the UUID deterministically from the VM's
// attributes (e.g. name + namespace + cluster ID) to ensure
// idempotency and traceability across re-provisions.
// A uniqueness check against existing storage entries should also
// be added to avoid collisions before writing the resource.
let trustee_uuid = Uuid::new_v4().to_string();
Comment thread
MatiasVara marked this conversation as resolved.
let resource_path = format!("default/{trustee_uuid}/root");
let luks_key = self.generate_random_key();

let initdata_toml = self.generate_initdata_toml(&resource_path);
let confdata_toml = Self::generate_confdata_toml(&luks_key);

let oemstring = B64.encode(initdata_toml.as_bytes());
let mrconfigid = {
let digest = Sha384::digest(initdata_toml.as_bytes());
B64.encode(digest)
};

// Write the confdata (LUKS key) to storage via the same backend
// that the `resource` plugin reads from.
// TODO: Using overwrite: true could silently replace another VM's key
// on UUID collision. Use overwrite: false and handle the conflict error,
// or check existence before writing.
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah probably should not do this. Instead, you can introduce another endpoint to this plugin for serving resources.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you elaborate?

self.storage
.set(
&resource_path,
confdata_toml.as_bytes(),
SetParameters { overwrite: false },
)
.await
.map_err(|e| anyhow!("failed to write resource: {e}"))?;

// NOTE: The sidecar currently only consumes `oemstring` and `mrconfigid`.
// `uuid` and `resource_path` are included for debugging/deprovision but
// are redundant for the sidecar since `oemstring` (base64 of initdata.toml)
// already embeds the resource_path.
let response = ProvisionResponse {
uuid: trustee_uuid,
oemstring,
mrconfigid,
resource_path,
};

self.cache.write().await.insert(cache_key, response.clone());

Ok(serde_json::to_vec(&response)?)
}

async fn handle_deprovision(&self, path: &[&str]) -> Result<Vec<u8>> {
let trustee_uuid = path.first().ok_or_else(|| anyhow!("missing uuid in path"))?;
let resource_path = format!("default/{trustee_uuid}/root");

let _ = self.storage.delete(&resource_path).await;

// Remove from cache
self.cache.write().await.retain(|_, v| v.uuid != *trustee_uuid);

Ok(serde_json::to_vec(&StatusResponse {
status: "deleted".into(),
})?)
}
}

#[async_trait::async_trait]
impl super::super::plugin_manager::ClientPlugin for Provisioner {
async fn handle(
&self,
body: &[u8],
_query: &HashMap<String, String>,
path: &[&str],
method: &Method,
) -> Result<Vec<u8>> {
match (method.as_str(), path.first().copied()) {
("POST", Some("provision")) => self.handle_provision(body).await,
("DELETE", Some("provision")) => {
self.handle_deprovision(&path[1..]).await
}
_ => bail!("unsupported: {} /kbs/v0/provisioner/{}", method, path.join("/")),
}
}

async fn validate_auth(
&self,
_body: &[u8],
_query: &HashMap<String, String>,
_path: &[&str],
_method: &Method,
) -> Result<bool> {
// Return true so KBS routes through the admin auth path
// (instead of the attestation token path which requires a TEE session).
//
// NOTE: Currently relies on InsecureAllowAll admin backend for dev.
// For production, switch to Simple admin with JWT-signed requests
// and scoped roles, e.g.:
// [admin] type = "Simple"
// [[admin.personas]] id = "provisioner" public_key_path = "..."
// [[admin.roles]] id = "provisioner" allowed_endpoints = "^/kbs/v0/provisioner/.*$"
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note that this config is changing a bit, but I think your approach is generally sound. You will have an admin token just for this plugin that will be given to the hook sidecar thing, right?

Ok(true)
}

async fn encrypted(
&self,
_body: &[u8],
_query: &HashMap<String, String>,
_path: &[&str],
_method: &Method,
) -> Result<bool> {
// Responses don't need TEE encryption (caller is infrastructure, not a TEE).
Ok(false)
}
}
12 changes: 12 additions & 0 deletions kbs/src/plugins/plugin_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ use serde::Deserialize;

use super::{sample, RepositoryConfig, ResourceStorage};

use super::{Provisioner, ProvisionerConfig};

#[cfg(feature = "nebula-ca-plugin")]
use super::{NebulaCaPlugin, NebulaCaPluginConfig};

Expand Down Expand Up @@ -74,6 +76,9 @@ pub enum PluginsConfig {
#[cfg(feature = "pkcs11")]
#[serde(alias = "pkcs11")]
Pkcs11(Pkcs11Config),

#[serde(alias = "provisioner")]
Provisioner(ProvisionerConfig),
}

impl Display for PluginsConfig {
Expand All @@ -85,6 +90,7 @@ impl Display for PluginsConfig {
PluginsConfig::NebulaCaPlugin(_) => f.write_str("nebula-ca"),
#[cfg(feature = "pkcs11")]
PluginsConfig::Pkcs11(_) => f.write_str("pkcs11"),
PluginsConfig::Provisioner(_) => f.write_str("provisioner"),
}
}
}
Expand Down Expand Up @@ -121,6 +127,12 @@ impl PluginsConfig {
.context("Initialize 'pkcs11' plugin failed")?;
Arc::new(pkcs11) as _
}
PluginsConfig::Provisioner(cfg) => {
let prov = Provisioner::new(cfg, storage_backend_config)
.await
.context("Initialize 'Provisioner' plugin failed")?;
Arc::new(prov) as _
}
};

Ok(plugin)
Expand Down