diff --git a/build.zig.zon b/build.zig.zon index a31755f0e..f1e7cef2a 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -102,7 +102,7 @@ }, .spec_test_version = .{ .type = .string, - .default = "v1.6.0-beta.2", + .default = "v1.7.0-alpha.5", .description = "Consensus spec tests version tag.", }, .spec_test_out_dir = .{ @@ -378,6 +378,7 @@ .bls, .persistent_merkle_tree, .hex, + .fork_choice, }, }, }, diff --git a/src/fork_choice/fork_choice.zig b/src/fork_choice/fork_choice.zig index 86cf26c4a..b8e85986e 100644 --- a/src/fork_choice/fork_choice.zig +++ b/src/fork_choice/fork_choice.zig @@ -390,6 +390,7 @@ pub const ForkChoice = struct { pub fn onBlock( self: *ForkChoice, allocator: Allocator, + io: std.Io, block: *const AnyBeaconBlock, state: *CachedBeaconState, block_delay_sec: u32, @@ -403,6 +404,7 @@ pub const ForkChoice = struct { inline else => |fork| try self.onBlockInner( fork, allocator, + io, block.castToFork(.full, fork), state, block_delay_sec, @@ -424,6 +426,7 @@ pub const ForkChoice = struct { self: *ForkChoice, comptime fork: ForkSeq, allocator: Allocator, + io: std.Io, block: *const BeaconBlock(.full, fork), state: *CachedBeaconState, block_delay_sec: u32, @@ -471,6 +474,9 @@ pub const ForkChoice = struct { // (before attesting interval = before 1st interval). const is_timely = self.isBlockTimely(slot, block_delay_sec); // Only boost the first block we see. + // TODO GLOAS: v1.7.0-alpha.1 added proposer index check in update_proposer_boost_root + // (block.proposer_index == get_beacon_proposer_index(head_state)). + // Not yet implemented — matches Lodestar TS unstable. if (self.opts.proposer_boost and is_timely and self.proposer_boost_root == null) { self.proposer_boost_root = block_root; } @@ -494,6 +500,7 @@ pub const ForkChoice = struct { // 8. Update realized checkpoints. var realized_ctx = OnBlockBalancesCtx{ + .allocator = allocator, .getter = self.fc_store.justified_balances_getter, .checkpoint = justified_checkpoint, .state = state, @@ -530,7 +537,7 @@ pub const ForkChoice = struct { }; } else { // Compute new, happens ~2/3 first blocks of epoch as monitored in mainnet. - const unrealized = try state_transition.computeUnrealizedCheckpoints(state, allocator); + const unrealized = try state_transition.computeUnrealizedCheckpoints(allocator, io, state); unrealized_justified_checkpoint = .{ .epoch = unrealized.justified_checkpoint.epoch, .root = unrealized.justified_checkpoint.root, @@ -547,6 +554,7 @@ pub const ForkChoice = struct { // Update best known unrealized justified & finalized checkpoints. var unrealized_balances_ctx = OnBlockBalancesCtx{ + .allocator = allocator, .getter = self.fc_store.justified_balances_getter, .checkpoint = unrealized_justified_checkpoint, .state = state, @@ -560,6 +568,7 @@ pub const ForkChoice = struct { // checkpoints right away. if (block_epoch < computeEpochAtSlot(current_slot)) { var past_epoch_ctx = OnBlockBalancesCtx{ + .allocator = allocator, .getter = self.fc_store.justified_balances_getter, .checkpoint = unrealized_justified_checkpoint, .state = state, @@ -1334,6 +1343,7 @@ pub const ForkChoice = struct { /// Closure context for `onBlock` path: calls `getter.get(checkpoint, state)` → wraps in RC. const OnBlockBalancesCtx = struct { + allocator: Allocator, getter: store.JustifiedBalancesGetter, checkpoint: Checkpoint, state: *CachedBeaconState, @@ -1341,7 +1351,7 @@ pub const ForkChoice = struct { fn call(ctx: ?*anyopaque) error{OutOfMemory}!*EffectiveBalanceIncrementsRc { const self: *OnBlockBalancesCtx = @ptrCast(@alignCast(ctx.?)); const balances = self.getter.get(self.checkpoint, self.state); - return EffectiveBalanceIncrementsRc.init(balances.allocator, balances); + return EffectiveBalanceIncrementsRc.init(self.allocator, balances); } }; diff --git a/test/spec/root.zig b/test/spec/root.zig index f1f763166..adb65968e 100644 --- a/test/spec/root.zig +++ b/test/spec/root.zig @@ -10,6 +10,7 @@ comptime { testing.refAllDecls(@import("./test_case/sanity_tests.zig")); testing.refAllDecls(@import("./test_case/epoch_processing_tests.zig")); testing.refAllDecls(@import("./test_case/fork_tests.zig")); + testing.refAllDecls(@import("./test_case/fork_choice_tests.zig")); testing.refAllDecls(@import("./test_case/transition_tests.zig")); testing.refAllDecls(@import("./test_case/random_tests.zig")); testing.refAllDecls(@import("./test_case/finality_tests.zig")); diff --git a/test/spec/runner/fork_choice.zig b/test/spec/runner/fork_choice.zig new file mode 100644 index 000000000..a0bd3c717 --- /dev/null +++ b/test/spec/runner/fork_choice.zig @@ -0,0 +1,1206 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; + +const ssz = @import("consensus_types"); +const primitives = ssz.primitive; +const Slot = primitives.Slot.Type; +const Epoch = primitives.Epoch.Type; +const Root = primitives.Root.Type; + +const config_mod = @import("config"); +const ForkSeq = config_mod.ForkSeq; +const BeaconConfig = config_mod.BeaconConfig; + +const state_transition = @import("state_transition"); +const CachedBeaconState = state_transition.CachedBeaconState; +const TestCachedBeaconState = state_transition.test_utils.TestCachedBeaconState; + +const fork_choice_mod = @import("fork_choice"); +const ForkChoice = fork_choice_mod.ForkChoice; +const ForkChoiceStore = fork_choice_mod.ForkChoiceStore; +const ProtoArray = fork_choice_mod.ProtoArray; +const ProtoBlock = fork_choice_mod.ProtoBlock; +const ExecutionStatus = fork_choice_mod.ExecutionStatus; +const DataAvailabilityStatus = fork_choice_mod.DataAvailabilityStatus; +const Checkpoint = fork_choice_mod.Checkpoint; +const SszCheckpoint = ssz.phase0.Checkpoint.Type; +const ForkChoiceOpts = fork_choice_mod.ForkChoiceOpts; +const JustifiedBalancesGetter = fork_choice_mod.JustifiedBalancesGetter; +const JustifiedBalances = fork_choice_mod.JustifiedBalances; +const LVHExecResponse = fork_choice_mod.LVHExecResponse; +const BlockExtraMeta = fork_choice_mod.BlockExtraMeta; + +const fork_types = @import("fork_types"); +const AnyBeaconBlock = fork_types.AnyBeaconBlock; +const AnySignedBeaconBlock = fork_types.AnySignedBeaconBlock; +const AnyBeaconState = fork_types.AnyBeaconState; +const AnyIndexedAttestation = fork_types.AnyIndexedAttestation; +const AnyAttesterSlashing = fork_types.AnyAttesterSlashing; + +const Node = @import("persistent_merkle_tree").Node; + +const ZERO_HASH: Root = [_]u8{0} ** 32; + +const test_case = @import("../test_case.zig"); +const loadSszSnappyValue = test_case.loadSszSnappyValue; +const getSszSnappyDecompressedSize = test_case.getSszSnappyDecompressedSize; +const loadSignedBeaconBlock = test_case.loadSignedBeaconBlock; +const deinitSignedBeaconBlock = test_case.deinitSignedBeaconBlock; +const loadBeaconBlock = test_case.loadBeaconBlock; +const deinitBeaconBlock = test_case.deinitBeaconBlock; +const TestCaseUtils = test_case.TestCaseUtils; + +// ── YAML Step Types ── + +const Step = union(enum) { + tick: u64, + block: BlockStep, + attestation: []const u8, + attester_slashing: []const u8, + pow_block: []const u8, + payload_status: PayloadStatusStep, + checks: Checks, +}; + +const BlockStep = struct { + name: []const u8, + valid: bool = true, + blobs: ?[]const u8 = null, + blobs_count: usize = 0, + proofs_count: usize = 0, +}; + +const PayloadStatusStep = struct { + block_hash: Root, + status: enum { valid, invalid, syncing, accepted }, + latest_valid_hash: ?Root = null, +}; + +const Checks = struct { + genesis_time: ?u64 = null, + time: ?u64 = null, + head: ?CheckHead = null, + justified_checkpoint: ?CheckCheckpoint = null, + finalized_checkpoint: ?CheckCheckpoint = null, + proposer_boost_root: ?Root = null, + get_proposer_head: ?Root = null, + should_override_forkchoice_update: ?ShouldOverrideFCU = null, +}; + +const CheckHead = struct { + slot: Slot, + root: Root, +}; + +const CheckCheckpoint = struct { + epoch: Epoch, + root: Root, +}; + +const ShouldOverrideFCU = struct { + validator_is_connected: bool = true, + result: bool, +}; + +// ── TestCase ── + +pub fn TestCase(comptime fork: ForkSeq) type { + const tc_utils = TestCaseUtils(fork); + + return struct { + allocator: Allocator, + pool: *Node.Pool, + + // Anchor state (provides BeaconConfig + immutable data for state_transition) + anchor_state: TestCachedBeaconState, + + // Fork choice components (heap-allocated for stable pointers) + fc: *ForkChoice, + fc_store: *ForkChoiceStore, + proto_array: *ProtoArray, + + // State cache: maps block_root → post_state for proper fork handling. + // Blocks may reference different parents (forking), so each block's state + // transition must start from the correct parent's post-state. + state_cache: std.AutoHashMap(Root, *CachedBeaconState), + + // Pending payload statuses: maps execution_payload.block_hash → status. + // In TS, payload_status steps pre-load the execution engine mock; the stored status + // is consumed during subsequent processBlock calls. We replicate this by storing + // statuses and looking them up when processing blocks. + pending_payload_statuses: std.AutoHashMap(Root, PayloadStatusStep), + + // Root of the anchor block (used as fallback parent state) + anchor_block_root: Root, + + // Tick time in seconds (from steps.yaml tick values) + tick_time: u64 = 0, + + // Parsed steps + steps: []Step, + + // Test directory for loading SSZ files on demand + test_dir: std.Io.Dir, + + const Self = @This(); + + pub fn execute(allocator: Allocator, pool: *Node.Pool, dir: std.Io.Dir) !void { + var tc = Self.init(allocator, pool, dir) catch |err| { + if (err == error.SkipZigTest) return err; + std.debug.print("fork_choice init error: {s}\n", .{@errorName(err)}); + return err; + }; + defer tc.deinit(); + + tc.runSteps() catch |err| { + std.debug.print("fork_choice step error: {s}\n", .{@errorName(err)}); + return err; + }; + } + + fn init(allocator: Allocator, pool: *Node.Pool, dir: std.Io.Dir) !Self { + // 1. Load anchor state + var anchor_state = try tc_utils.loadPreStateFromFile(allocator, pool, dir, "anchor_state.ssz_snappy"); + errdefer anchor_state.deinit(); + + // 2. Load anchor block and compute block_root + const anchor_block = try loadBeaconBlock(allocator, fork, dir, "anchor_block.ssz_snappy"); + defer deinitBeaconBlock(anchor_block, allocator); + + var block_root: Root = undefined; + try anchor_block.hashTreeRoot(allocator, &block_root); + + const anchor_slot = anchor_block.slot(); + const parent_root = anchor_block.parentRoot().*; + const state_root = anchor_block.stateRoot().*; + + // 3. Get justified/finalized checkpoints from anchor state + // + // Note: The anchor checkpoint root must match the block_root stored in ProtoArray. + // For genesis, the state's currentJustifiedCheckpoint has root=0x00..00, but + // ProtoArray stores the actual block hash. Following the Lodestar TS pattern: + // use the computed block_root as the checkpoint root (matching computeAnchorCheckpoint). + const anchor_cached = anchor_state.cached_state; + var justified_cp_val: SszCheckpoint = undefined; + try anchor_cached.state.currentJustifiedCheckpoint(&justified_cp_val); + var finalized_cp_val: SszCheckpoint = undefined; + try anchor_cached.state.finalizedCheckpoint(&finalized_cp_val); + + // Override checkpoint roots with the anchor block root + // (mirrors TS computeAnchorCheckpoint which uses BeaconBlockHeader hash) + justified_cp_val.root = block_root; + finalized_cp_val.root = block_root; + + const justified_cp = Checkpoint{ + .epoch = justified_cp_val.epoch, + .root = justified_cp_val.root, + }; + const finalized_cp = Checkpoint{ + .epoch = finalized_cp_val.epoch, + .root = finalized_cp_val.root, + }; + + // 4. Build anchor ProtoBlock + const anchor_proto_block = ProtoBlock{ + .slot = anchor_slot, + .block_root = block_root, + .parent_root = parent_root, + .state_root = state_root, + .target_root = block_root, // ProtoArray.initialize sets this + .justified_epoch = justified_cp_val.epoch, + .justified_root = justified_cp_val.root, + .finalized_epoch = finalized_cp_val.epoch, + .finalized_root = finalized_cp_val.root, + .unrealized_justified_epoch = justified_cp_val.epoch, + .unrealized_justified_root = justified_cp_val.root, + .unrealized_finalized_epoch = finalized_cp_val.epoch, + .unrealized_finalized_root = finalized_cp_val.root, + .extra_meta = .pre_merge, + .timeliness = false, + }; + + // 5. Initialize ProtoArray + const proto_array = try allocator.create(ProtoArray); + errdefer allocator.destroy(proto_array); + proto_array.* = undefined; + try proto_array.initialize(allocator, anchor_proto_block, anchor_slot); + errdefer proto_array.deinit(allocator); + + // 6. Compute justified balances + var justified_balances = try state_transition.getEffectiveBalanceIncrementsZeroInactive(allocator, anchor_cached); + defer justified_balances.deinit(allocator); + + // 7. Initialize ForkChoiceStore + const fc_store = try allocator.create(ForkChoiceStore); + errdefer allocator.destroy(fc_store); + try fc_store.init( + allocator, + anchor_slot, + justified_cp, + finalized_cp, + justified_balances.items, + JustifiedBalancesGetter{ + .getFn = specTestBalancesGetter, + }, + .{}, + ); + errdefer fc_store.deinit(allocator); + + // 8. Initialize ForkChoice + const fc = try allocator.create(ForkChoice); + errdefer allocator.destroy(fc); + try fc.init( + allocator, + anchor_state.config, + fc_store, + proto_array, + @intCast(justified_balances.items.len), + .{ + .proposer_boost = true, + .proposer_boost_reorg = true, + .compute_unrealized = true, + }, + ); + errdefer fc.deinit(allocator); + + // 9. Parse steps.yaml + const steps = try parseSteps(allocator, dir); + + return Self{ + .allocator = allocator, + .pool = pool, + .anchor_state = anchor_state, + .fc = fc, + .fc_store = fc_store, + .proto_array = proto_array, + .state_cache = std.AutoHashMap(Root, *CachedBeaconState).init(allocator), + .pending_payload_statuses = std.AutoHashMap(Root, PayloadStatusStep).init(allocator), + .anchor_block_root = block_root, + .tick_time = 0, + .steps = steps, + .test_dir = dir, + }; + } + + fn deinit(self: *Self) void { + freeSteps(self.allocator, self.steps); + // Free all cached post-states + var it = self.state_cache.iterator(); + while (it.next()) |entry| { + entry.value_ptr.*.deinit(); + self.allocator.destroy(entry.value_ptr.*); + } + self.state_cache.deinit(); + self.pending_payload_statuses.deinit(); + self.fc.deinit(self.allocator); + self.allocator.destroy(self.fc); + self.fc_store.deinit(self.allocator); + self.allocator.destroy(self.fc_store); + self.proto_array.deinit(self.allocator); + self.allocator.destroy(self.proto_array); + self.anchor_state.deinit(); + } + + fn runSteps(self: *Self) !void { + for (self.steps, 0..) |step, step_idx| { + _ = step_idx; + switch (step) { + .tick => |t| try self.handleTick(t), + .block => |b| try self.handleBlock(b), + .attestation => |a| try self.handleAttestation(a), + .attester_slashing => |s| try self.handleAttesterSlashing(s), + .pow_block => { + // pow_block steps are not currently supported (bellatrix on_merge_block only). + // Skip for now — the block step with valid:false will catch expected failures. + }, + .payload_status => |ps| self.handlePayloadStatus(ps), + .checks => |c| try self.handleChecks(c), + } + } + } + + fn handleTick(self: *Self, time: u64) !void { + self.tick_time = time; + const seconds_per_slot = self.anchor_state.config.chain.SECONDS_PER_SLOT; + const current_slot: Slot = @intCast(time / seconds_per_slot); + try self.fc.updateTime(self.allocator, current_slot); + } + + fn handleBlock(self: *Self, block_step: BlockStep) !void { + const file_name = try std.fmt.allocPrint(self.allocator, "{s}.ssz_snappy", .{block_step.name}); + defer self.allocator.free(file_name); + + const signed_block = try loadSignedBeaconBlock(self.allocator, fork, self.test_dir, file_name); + defer deinitSignedBeaconBlock(signed_block, self.allocator); + + const beacon_block = signed_block.beaconBlock(); + + // Resolve blobs_count from the SSZ blobs file if present. + // TS loads the blobs SSZ file (ssz.deneb.Blobs = List(Blob, limit)) and checks .length. + // Each Blob is a fixed-size ByteVector, so blob_count = decompressed_size / BYTES_PER_BLOB. + var resolved_step = block_step; + if (comptime fork.gte(.deneb) and !fork.gte(.fulu)) { + if (block_step.blobs) |blobs_name| { + const blobs_file_name = try std.fmt.allocPrint(self.allocator, "{s}.ssz_snappy", .{blobs_name}); + defer self.allocator.free(blobs_file_name); + const BYTES_PER_BLOB = 4096 * 32; // FIELD_ELEMENTS_PER_BLOB * BYTES_PER_FIELD_ELEMENT + const decompressed_size = try getSszSnappyDecompressedSize(self.allocator, self.test_dir, blobs_file_name); + resolved_step.blobs_count = decompressed_size / BYTES_PER_BLOB; + } + } + + // Look up the parent's post-state from cache; fall back to anchor state + const parent_root = beacon_block.parentRoot().*; + const input_state = if (self.state_cache.get(parent_root)) |cs| + cs + else if (std.mem.eql(u8, &parent_root, &self.anchor_block_root)) + self.anchor_state.cached_state + else + self.anchor_state.cached_state; + + // Run state_transition to get post-state + const post_state_result = state_transition.stateTransition( + self.allocator, + std.testing.io, + input_state, + signed_block, + .{ + .verify_signatures = false, + .verify_proposer = false, + .verify_state_root = false, + }, + ); + + if (block_step.valid) { + // Block expected to be valid + const post_state = try post_state_result; + errdefer { + post_state.deinit(); + self.allocator.destroy(post_state); + } + + // Compute block_root + var block_root: Root = undefined; + try beacon_block.hashTreeRoot(self.allocator, &block_root); + + // Determine block_delay: if tick happened in the same slot, compute seconds into slot + const seconds_per_slot = self.anchor_state.config.chain.SECONDS_PER_SLOT; + const block_slot = beacon_block.slot(); + const slot_start_time = block_slot * seconds_per_slot; + const block_delay: u32 = if (self.tick_time >= slot_start_time) + @intCast(self.tick_time - slot_start_time) + else + 0; + + const current_slot: Slot = @intCast(self.tick_time / seconds_per_slot); + + // Determine execution status — check pending payload statuses first. + // In TS, the execution engine mock returns a predefined status if one was + // stored by a prior payload_status step; otherwise defaults to VALID. + const execution_status = self.getBlockExecutionStatus(&beacon_block); + const da_status = getDataAvailabilityStatus(beacon_block); + + // Validate blob/proof counts for deneb/electra blocks (matching TS behavior). + // TS verifies blobs.length === commitments.length && proofs.length === commitments.length. + // Fulu uses columns (not blobs/proofs), Gloas has no DA in beacon block. + if (comptime fork.gte(.deneb) and !fork.gte(.fulu)) { + try self.validateBlobsProofs(&beacon_block, resolved_step); + } + + // Call fork choice onBlock + _ = try self.fc.onBlock( + self.allocator, + std.testing.io, + &beacon_block, + post_state, + block_delay, + current_slot, + execution_status, + da_status, + ); + + // Import attestations from block body into fork choice. + // The pyspec processes block body attestations via on_attestation(is_from_block=True), + // and Lodestar TS does this via importAttestations: Force. Without this, + // fork choice has no validator votes and proposer boost alone determines the head. + try self.importBlockAttestations(&beacon_block, post_state); + + // Store post_state in cache keyed by block_root. + // Prune old entries to avoid pool exhaustion — keep only the + // last few states (most tests are linear chains; forked tests + // have short branches). + try self.state_cache.put(block_root, post_state); + self.pruneStateCache(block_root); + } else { + // Block expected to be invalid — either state_transition, blob validation, + // or onBlock should error. + + // Check blob/proof validation first (may catch the expected failure). + if (comptime fork.gte(.deneb) and !fork.gte(.fulu)) { + self.validateBlobsProofs(&beacon_block, resolved_step) catch { + if (post_state_result) |ps| { + ps.deinit(); + self.allocator.destroy(ps); + } else |_| {} + return; // Expected failure in blob validation + }; + } + + if (post_state_result) |post_state| { + defer { + post_state.deinit(); + self.allocator.destroy(post_state); + } + // state_transition succeeded, try onBlock (it may still fail) + var block_root: Root = undefined; + try beacon_block.hashTreeRoot(self.allocator, &block_root); + + const seconds_per_slot = self.anchor_state.config.chain.SECONDS_PER_SLOT; + const current_slot: Slot = @intCast(self.tick_time / seconds_per_slot); + const execution_status = self.getBlockExecutionStatus(&beacon_block); + + _ = self.fc.onBlock( + self.allocator, + std.testing.io, + &beacon_block, + post_state, + 0, + current_slot, + execution_status, + .not_required, + ) catch { + return; // Expected failure in onBlock + }; + // Block was expected to be invalid (valid:false) but all checks passed. + // Mirrors TS: `if (!isValid) throw Error("Expect error since this is a negative test")` + // Only assert for deneb/electra where blob validation is fully implemented. + // TODO: Enable for bellatrix (merge validation) and fulu+ (PeerDAS columns) once implemented. + if (comptime fork.gte(.deneb) and !fork.gte(.fulu)) { + return error.ExpectedInvalidBlock; + } + } else |_| { + return; // Expected failure in state_transition + } + } + } + + fn handleAttestation(self: *Self, att_name: []const u8) !void { + const file_name = try std.fmt.allocPrint(self.allocator, "{s}.ssz_snappy", .{att_name}); + defer self.allocator.free(file_name); + + // Find the best state for epoch cache (most recently added, or anchor) + const head_state = self.getHeadState(); + + if (comptime fork.gte(.electra)) { + // Electra+ attestation format (wider committee_bits) + const types_mod = @import("consensus_types"); + var attestation: types_mod.electra.Attestation.Type = types_mod.electra.Attestation.default_value; + try loadSszSnappyValue(types_mod.electra.Attestation, self.allocator, self.test_dir, file_name, &attestation); + defer types_mod.electra.Attestation.deinit(self.allocator, &attestation); + + var indexed_att: types_mod.electra.IndexedAttestation.Type = types_mod.electra.IndexedAttestation.default_value; + try head_state.epoch_cache.computeIndexedAttestationElectra(&attestation, &indexed_att); + defer types_mod.electra.IndexedAttestation.deinit(self.allocator, &indexed_att); + + var att_data_root: Root = undefined; + try types_mod.phase0.AttestationData.hashTreeRoot(&attestation.data, &att_data_root); + + const any_indexed = AnyIndexedAttestation{ .electra = &indexed_att }; + try self.fc.onAttestation(self.allocator, &any_indexed, att_data_root, false); + } else { + // Phase0 attestation format (pre-electra) + const types_mod = @import("consensus_types"); + var attestation: types_mod.phase0.Attestation.Type = types_mod.phase0.Attestation.default_value; + try loadSszSnappyValue(types_mod.phase0.Attestation, self.allocator, self.test_dir, file_name, &attestation); + defer types_mod.phase0.Attestation.deinit(self.allocator, &attestation); + + var indexed_att: types_mod.phase0.IndexedAttestation.Type = types_mod.phase0.IndexedAttestation.default_value; + try head_state.epoch_cache.computeIndexedAttestationPhase0(&attestation, &indexed_att); + defer types_mod.phase0.IndexedAttestation.deinit(self.allocator, &indexed_att); + + var att_data_root: Root = undefined; + try types_mod.phase0.AttestationData.hashTreeRoot(&attestation.data, &att_data_root); + + const any_indexed = AnyIndexedAttestation{ .phase0 = &indexed_att }; + try self.fc.onAttestation(self.allocator, &any_indexed, att_data_root, false); + } + } + + fn handleAttesterSlashing(self: *Self, slashing_name: []const u8) !void { + const file_name = try std.fmt.allocPrint(self.allocator, "{s}.ssz_snappy", .{slashing_name}); + defer self.allocator.free(file_name); + + if (comptime fork.gte(.electra)) { + const types_mod = @import("consensus_types"); + var slashing: types_mod.electra.AttesterSlashing.Type = types_mod.electra.AttesterSlashing.default_value; + try loadSszSnappyValue(types_mod.electra.AttesterSlashing, self.allocator, self.test_dir, file_name, &slashing); + defer types_mod.electra.AttesterSlashing.deinit(self.allocator, &slashing); + + const any_slashing = AnyAttesterSlashing{ .electra = &slashing }; + try self.fc.onAttesterSlashing(self.allocator, &any_slashing); + } else { + const types_mod = @import("consensus_types"); + var slashing: types_mod.phase0.AttesterSlashing.Type = types_mod.phase0.AttesterSlashing.default_value; + try loadSszSnappyValue(types_mod.phase0.AttesterSlashing, self.allocator, self.test_dir, file_name, &slashing); + defer types_mod.phase0.AttesterSlashing.deinit(self.allocator, &slashing); + + const any_slashing = AnyAttesterSlashing{ .phase0 = &slashing }; + try self.fc.onAttesterSlashing(self.allocator, &any_slashing); + } + } + + /// Import attestations from a block body into fork choice. + /// Mirrors pyspec on_attestation(store, att, is_from_block=True) and + /// Lodestar TS importAttestations: Force. + fn importBlockAttestations(self: *Self, beacon_block: *const AnyBeaconBlock, post_state: *CachedBeaconState) !void { + const body = beacon_block.beaconBlockBody(); + const any_atts = body.attestations(); + const atts_items = any_atts.items(); + + switch (atts_items) { + .electra => |electra_atts| { + const types_mod = @import("consensus_types"); + for (electra_atts) |*att| { + var indexed_att: types_mod.electra.IndexedAttestation.Type = types_mod.electra.IndexedAttestation.default_value; + post_state.epoch_cache.computeIndexedAttestationElectra(att, &indexed_att) catch continue; + defer types_mod.electra.IndexedAttestation.deinit(self.allocator, &indexed_att); + + var att_data_root: Root = undefined; + types_mod.phase0.AttestationData.hashTreeRoot(&att.data, &att_data_root) catch continue; + + const any_indexed = AnyIndexedAttestation{ .electra = &indexed_att }; + self.fc.onAttestation(self.allocator, &any_indexed, att_data_root, true) catch continue; + } + }, + .phase0 => |phase0_atts| { + const types_mod = @import("consensus_types"); + for (phase0_atts) |*att| { + var indexed_att: types_mod.phase0.IndexedAttestation.Type = types_mod.phase0.IndexedAttestation.default_value; + post_state.epoch_cache.computeIndexedAttestationPhase0(att, &indexed_att) catch continue; + defer types_mod.phase0.IndexedAttestation.deinit(self.allocator, &indexed_att); + + var att_data_root: Root = undefined; + types_mod.phase0.AttestationData.hashTreeRoot(&att.data, &att_data_root) catch continue; + + const any_indexed = AnyIndexedAttestation{ .phase0 = &indexed_att }; + self.fc.onAttestation(self.allocator, &any_indexed, att_data_root, true) catch continue; + } + }, + } + } + + /// Get the most recent cached state (for epoch cache access). + /// Falls back to anchor state if no blocks have been processed yet. + fn getHeadState(self: *Self) *CachedBeaconState { + // Use the head block's state if available in cache + const head_root = self.fc.getHeadRoot(); + if (self.state_cache.get(head_root)) |state| { + return state; + } + return self.anchor_state.cached_state; + } + + fn handlePayloadStatus(self: *Self, ps: PayloadStatusStep) void { + // Store the status for future block processing (mirrors TS execution engine mock). + // When a subsequent block references this execution payload hash, we'll use + // the stored status instead of the default (.valid). + self.pending_payload_statuses.put(ps.block_hash, ps) catch {}; + + // For valid/invalid, also call validateLatestHash to update + // already-processed blocks (handles post-block payload_status steps). + const seconds_per_slot = self.anchor_state.config.chain.SECONDS_PER_SLOT; + const current_slot: Slot = @intCast(self.tick_time / seconds_per_slot); + + const response: LVHExecResponse = switch (ps.status) { + .valid => .{ .valid = .{ .latest_valid_exec_hash = ps.block_hash } }, + .invalid => .{ .invalid = .{ + .latest_valid_exec_hash = ps.latest_valid_hash, + .invalidate_from_parent_block_root = ZERO_HASH, + } }, + .syncing, .accepted => return, // No retroactive update needed + }; + self.fc.validateLatestHash(self.allocator, response, current_slot); + } + + fn handleChecks(self: *Self, checks: Checks) !void { + // time check: fork_choice stores time as slots, test provides time in seconds + if (checks.time) |expected_time| { + const seconds_per_slot = self.anchor_state.config.chain.SECONDS_PER_SLOT; + const expected_slot: Slot = @intCast(expected_time / seconds_per_slot); + const actual_slot = self.fc.getTime(); + try std.testing.expectEqual(expected_slot, actual_slot); + } + + // head check + if (checks.head) |expected_head| { + const result = try self.fc.updateAndGetHead(self.allocator, .{ .get_canonical_head = {} }); + try std.testing.expectEqual(expected_head.slot, result.head.slot); + try std.testing.expectEqualSlices(u8, &expected_head.root, &result.head.block_root); + } + + // justified checkpoint check + if (checks.justified_checkpoint) |expected| { + const actual = self.fc.getJustifiedCheckpoint(); + try std.testing.expectEqual(expected.epoch, actual.epoch); + try std.testing.expectEqualSlices(u8, &expected.root, &actual.root); + } + + // finalized checkpoint check + if (checks.finalized_checkpoint) |expected| { + const actual = self.fc.getFinalizedCheckpoint(); + try std.testing.expectEqual(expected.epoch, actual.epoch); + try std.testing.expectEqualSlices(u8, &expected.root, &actual.root); + } + + // proposer boost root check + if (checks.proposer_boost_root) |expected| { + const actual = self.fc.getProposerBoostRoot(); + try std.testing.expectEqualSlices(u8, &expected, &actual); + } + + // get_proposer_head check + if (checks.get_proposer_head) |expected_root| { + const seconds_per_slot = self.anchor_state.config.chain.SECONDS_PER_SLOT; + const current_slot: Slot = @intCast(self.tick_time / seconds_per_slot); + const sec_from_slot: u32 = @intCast(self.tick_time % seconds_per_slot); + + const result = try self.fc.updateAndGetHead(self.allocator, .{ + .get_proposer_head = .{ + .sec_from_slot = sec_from_slot, + .slot = current_slot, + }, + }); + try std.testing.expectEqualSlices(u8, &expected_root, &result.head.block_root); + } + + // should_override_forkchoice_update check + if (checks.should_override_forkchoice_update) |expected_fcu| { + const seconds_per_slot = self.anchor_state.config.chain.SECONDS_PER_SLOT; + const current_slot: Slot = @intCast(self.tick_time / seconds_per_slot); + const sec_from_slot: u32 = @intCast(self.tick_time % seconds_per_slot); + + const head_result = try self.fc.updateAndGetHead(self.allocator, .{ .get_canonical_head = {} }); + const result = self.fc.shouldOverrideForkChoiceUpdate( + &head_result.head, + sec_from_slot, + current_slot, + ); + const actual_should_override = switch (result) { + .should_override => true, + .should_not_override => false, + }; + try std.testing.expectEqual(expected_fcu.result, actual_should_override); + } + + // genesis_time is informational, we don't track it in fork choice + } + + /// Limit the state cache to at most max_cached_states entries to prevent + /// pool node exhaustion. Keep the most recently inserted entry (just_added) + /// and evict others when the cache exceeds the limit. + const max_cached_states = 100; + + fn pruneStateCache(self: *Self, just_added: Root) void { + while (self.state_cache.count() > max_cached_states) { + // Find a key to evict that is NOT the just_added entry + var to_remove: ?Root = null; + var iter = self.state_cache.iterator(); + while (iter.next()) |entry| { + if (!std.mem.eql(u8, &entry.key_ptr.*, &just_added)) { + to_remove = entry.key_ptr.*; + break; + } + } + if (to_remove) |key| { + if (self.state_cache.fetchRemove(key)) |kv| { + kv.value.deinit(); + self.allocator.destroy(kv.value); + } + } else break; + } + } + + fn getExecutionStatus(beacon_block: AnyBeaconBlock) ExecutionStatus { + // For pre-merge forks, execution is pre_merge + return switch (beacon_block.forkSeq()) { + .phase0, .altair => .pre_merge, + else => .valid, + }; + } + + /// Get execution status for a block, checking pending payload statuses first. + /// Mirrors the TS execution engine mock: if a payload_status step pre-loaded a + /// status for this block's execution payload hash, use it; otherwise default to + /// the fork-based default (.valid for post-merge). + fn getBlockExecutionStatus(self: *Self, beacon_block: *const AnyBeaconBlock) ExecutionStatus { + const fork_seq = beacon_block.forkSeq(); + if (fork_seq == .phase0 or fork_seq == .altair) return .pre_merge; + + // Try to get execution payload block hash and look up pending status. + const body = beacon_block.beaconBlockBody(); + if (body.executionPayload()) |exec_payload| { + const exec_block_hash = exec_payload.blockHash().*; + if (self.pending_payload_statuses.get(exec_block_hash)) |ps| { + return switch (ps.status) { + .valid => .valid, + .invalid => .invalid, + .syncing, .accepted => .syncing, + }; + } + } else |_| {} + + return .valid; + } + + /// Validate blob/proof counts match blobKzgCommitments (deneb+ only). + /// Mirrors TS: `if (blobs.length !== commitments.length || proofs.length !== commitments.length)` + /// We check both blobs_count and proofs_count against commitments. + fn validateBlobsProofs(_: *Self, beacon_block: *const AnyBeaconBlock, block_step: BlockStep) !void { + const body = beacon_block.beaconBlockBody(); + const commitments = try body.blobKzgCommitments(); + const commitments_len = commitments.items.len; + + // If this block step has no blobs/proofs data, default to count 0 + // (matching TS: `if (blobs === undefined) blobs = []`) + const has_blobs = block_step.blobs != null; + const blobs_count = if (has_blobs) block_step.blobs_count else 0; + const proofs_count = block_step.proofs_count; + + if (commitments_len == 0) { + // No commitments — no blobs/proofs required + return; + } + + if (!has_blobs and proofs_count == 0) { + // TS defaults missing blobs/proofs to empty arrays. + // Empty vs non-zero commitments would fail this check. + return error.InvalidBlobsOrProofsLength; + } + + // TS: `if (blobs.length !== commitments.length || proofs.length !== commitments.length)` + if (blobs_count != commitments_len) { + return error.InvalidBlobsOrProofsLength; + } + + if (proofs_count != commitments_len) { + return error.InvalidBlobsOrProofsLength; + } + } + + fn getDataAvailabilityStatus(beacon_block: AnyBeaconBlock) DataAvailabilityStatus { + return switch (beacon_block.forkSeq()) { + // Pre-data-availability forks + .phase0, .altair, .bellatrix, .capella => .pre_data, + // Gloas: beacon blocks have no DA requirement (execution payload separate) + .gloas => .not_required, + // Deneb+: DA is required and assumed available in spec tests + .deneb, .electra, .fulu => .available, + }; + } + }; +} + +// ── Justified Balances Getter for Spec Tests ── + +fn specTestBalancesGetter(_: ?*anyopaque, _: Checkpoint, state: *CachedBeaconState) JustifiedBalances { + // In spec tests, we always use the post-state's balances + const allocator = std.testing.allocator; + return state_transition.getEffectiveBalanceIncrementsZeroInactive(allocator, state) catch + return JustifiedBalances.empty; +} + +// ── YAML Parser ── + +fn parseSteps(allocator: Allocator, dir: std.Io.Dir) ![]Step { + const io = std.testing.io; + const content = try dir.readFileAlloc(io, "steps.yaml", allocator, .unlimited); + defer allocator.free(content); + + var steps: std.ArrayList(Step) = .empty; + errdefer { + for (steps.items) |*step| { + freeStep(allocator, step); + } + steps.deinit(allocator); + } + + var lines = std.mem.splitScalar(u8, content, '\n'); + var current_step_lines: std.ArrayList([]const u8) = .empty; + defer current_step_lines.deinit(allocator); + + while (lines.next()) |line| { + if (line.len >= 2 and line[0] == '-' and line[1] == ' ') { + // New step starts + if (current_step_lines.items.len > 0) { + const step = try parseStep(allocator, current_step_lines.items); + try steps.append(allocator, step); + current_step_lines.clearRetainingCapacity(); + } + try current_step_lines.append(allocator, line[2..]); // strip "- " + } else if (line.len > 0 and (line[0] == ' ' or line[0] == '\t')) { + // Continuation of current step + try current_step_lines.append(allocator, line); + } + // Empty lines are ignored + } + + // Parse last step + if (current_step_lines.items.len > 0) { + const step = try parseStep(allocator, current_step_lines.items); + try steps.append(allocator, step); + } + + return steps.toOwnedSlice(allocator); +} + +fn parseStep(allocator: Allocator, lines: []const []const u8) !Step { + if (lines.len == 0) return error.InvalidYaml; + + const first_line = std.mem.trim(u8, lines[0], " \t\r"); + + // Flow mapping: {key: value, ...} + // May span multiple lines (e.g., `{block: block_0x...,\n valid: true}`) + if (first_line.len > 0 and first_line[0] == '{') { + // Join all lines to handle multi-line flow mappings + if (lines.len > 1) { + var joined: std.ArrayList(u8) = .empty; + defer joined.deinit(allocator); + for (lines) |line| { + try joined.appendSlice(allocator, std.mem.trim(u8, line, " \t\r")); + try joined.append(allocator, ' '); + } + return parseFlowStep(allocator, joined.items); + } + return parseFlowStep(allocator, first_line); + } + + // Block mapping: key:\n subkey: value + if (std.mem.startsWith(u8, first_line, "checks:")) { + return .{ .checks = try parseChecks(lines[1..]) }; + } + + // Multi-line block step (e.g., block with blobs/proofs) + if (std.mem.startsWith(u8, first_line, "block:") or std.mem.startsWith(u8, first_line, "block: ")) { + return .{ .block = try parseBlockStep(allocator, lines) }; + } + + return error.InvalidYaml; +} + +fn parseFlowStep(allocator: Allocator, flow: []const u8) !Step { + // Strip { and } + const inner = std.mem.trim(u8, flow, "{ \t\r}"); + + // tick + if (extractFlowValue(inner, "tick")) |val| { + return .{ .tick = try std.fmt.parseInt(u64, val, 10) }; + } + + // attestation + if (extractFlowValue(inner, "attestation")) |val| { + const name = try allocator.dupe(u8, std.mem.trim(u8, val, " \r\t")); + return .{ .attestation = name }; + } + + // attester_slashing + if (extractFlowValue(inner, "attester_slashing")) |val| { + const name = try allocator.dupe(u8, std.mem.trim(u8, val, " \r\t")); + return .{ .attester_slashing = name }; + } + + // pow_block + if (extractFlowValue(inner, "pow_block")) |val| { + const name = try allocator.dupe(u8, std.mem.trim(u8, val, " \r\t")); + return .{ .pow_block = name }; + } + + // block (flow form): block: block_0x..., valid: true + if (extractFlowValue(inner, "block")) |val| { + // Might contain ", valid: true/false" + const comma_pos = std.mem.indexOf(u8, val, ","); + const block_name = if (comma_pos) |pos| + std.mem.trim(u8, val[0..pos], " \r\t") + else + std.mem.trim(u8, val, " \r\t"); + + var valid = true; + if (std.mem.indexOf(u8, inner, "valid:")) |vpos| { + const valid_str = std.mem.trim(u8, inner[vpos + "valid:".len ..], " \r\t,}"); + if (std.mem.eql(u8, valid_str, "false")) valid = false; + } + + return .{ .block = .{ + .name = try allocator.dupe(u8, block_name), + .valid = valid, + } }; + } + + return error.InvalidYaml; +} + +fn parseBlockStep(allocator: Allocator, lines: []const []const u8) !BlockStep { + if (lines.len == 0) return error.InvalidYaml; + + var name: []const u8 = ""; + var valid: bool = true; + var blobs: ?[]const u8 = null; + var proofs_count: usize = 0; + + var i: usize = 0; + while (i < lines.len) : (i += 1) { + const trimmed = std.mem.trim(u8, lines[i], " \t\r"); + + if (std.mem.startsWith(u8, trimmed, "block:") or std.mem.startsWith(u8, trimmed, "block: ")) { + if (extractValue(trimmed, "block:")) |val| { + const val_trimmed = std.mem.trim(u8, val, " \r\t"); + if (val_trimmed.len > 0) { + name = val_trimmed; + } + } + // If block: has empty value, next line is the block name + if (name.len == 0 and i + 1 < lines.len) { + const next = std.mem.trim(u8, lines[i + 1], " \t\r"); + if (std.mem.startsWith(u8, next, "block_")) { + name = next; + i += 1; + } + } + } else if (std.mem.startsWith(u8, trimmed, "valid:")) { + if (extractValue(trimmed, "valid:")) |val| { + const val_trimmed = std.mem.trim(u8, val, " \r\t"); + if (std.mem.eql(u8, val_trimmed, "false")) valid = false; + } + } else if (std.mem.startsWith(u8, trimmed, "blobs:")) { + if (extractValue(trimmed, "blobs:")) |val| { + const val_trimmed = std.mem.trim(u8, val, " \r\t"); + if (val_trimmed.len > 0) { + blobs = val_trimmed; + } + } + // If blobs: has empty value, next line is the blobs file name + if (blobs == null and i + 1 < lines.len) { + const next = std.mem.trim(u8, lines[i + 1], " \t\r"); + if (std.mem.startsWith(u8, next, "blobs_")) { + blobs = next; + i += 1; + } + } + } else if (std.mem.startsWith(u8, trimmed, "proofs:")) { + // proofs is a YAML list of hex strings. + // Flow-style: ['0x...', '0x...'] or block-style: - '0x...' + // Count the number of entries by counting '0x' occurrences. + if (extractValue(trimmed, "proofs:")) |val| { + var count: usize = 0; + var pos: usize = 0; + while (std.mem.indexOf(u8, val[pos..], "0x")) |idx| { + count += 1; + pos += idx + 2; + } + // If proofs span multiple lines, scan continuation lines. + if (std.mem.indexOfScalar(u8, val, ']') == null) { + var j = i + 1; + while (j < lines.len) : (j += 1) { + const next_trimmed = std.mem.trim(u8, lines[j], " \t\r"); + // Stop before YAML key lines that aren't part of the proofs list + if (std.mem.startsWith(u8, next_trimmed, "valid:") or + std.mem.startsWith(u8, next_trimmed, "blobs:") or + std.mem.startsWith(u8, next_trimmed, "columns:") or + std.mem.startsWith(u8, next_trimmed, "block:")) + { + // Backtrack so the outer loop processes this key line + j -= 1; + break; + } + var npos: usize = 0; + while (std.mem.indexOf(u8, next_trimmed[npos..], "0x")) |idx| { + count += 1; + npos += idx + 2; + } + if (std.mem.indexOfScalar(u8, next_trimmed, ']') != null) break; + } + i = j; + } + proofs_count = count; + } + } + // columns — ignored (PeerDAS data column handling not implemented) + } + + if (name.len == 0) return error.InvalidYaml; + + return .{ + .name = try allocator.dupe(u8, name), + .valid = valid, + .blobs = if (blobs) |b| try allocator.dupe(u8, b) else null, + .proofs_count = proofs_count, + }; +} + +fn parseChecks(lines: []const []const u8) !Checks { + var checks = Checks{}; + var i: usize = 0; + while (i < lines.len) : (i += 1) { + const trimmed = std.mem.trim(u8, lines[i], " \t\r"); + if (trimmed.len == 0) continue; + + if (std.mem.startsWith(u8, trimmed, "genesis_time:")) { + if (extractValue(trimmed, "genesis_time:")) |val| { + checks.genesis_time = std.fmt.parseInt(u64, std.mem.trim(u8, val, " "), 10) catch null; + } + } else if (std.mem.startsWith(u8, trimmed, "time:")) { + if (extractValue(trimmed, "time:")) |val| { + checks.time = std.fmt.parseInt(u64, std.mem.trim(u8, val, " "), 10) catch null; + } + } else if (std.mem.startsWith(u8, trimmed, "head:")) { + // Next two lines: slot and root + checks.head = try parseSubFieldHead(lines[i + 1 ..]); + i += 2; + } else if (std.mem.startsWith(u8, trimmed, "justified_checkpoint:")) { + checks.justified_checkpoint = try parseSubFieldCheckpoint(lines[i + 1 ..]); + i += 2; + } else if (std.mem.startsWith(u8, trimmed, "finalized_checkpoint:")) { + checks.finalized_checkpoint = try parseSubFieldCheckpoint(lines[i + 1 ..]); + i += 2; + } else if (std.mem.startsWith(u8, trimmed, "proposer_boost_root:")) { + if (extractValue(trimmed, "proposer_boost_root:")) |val| { + checks.proposer_boost_root = try parseHexRoot(val); + } + } else if (std.mem.startsWith(u8, trimmed, "get_proposer_head:")) { + if (extractValue(trimmed, "get_proposer_head:")) |val| { + checks.get_proposer_head = try parseHexRoot(val); + } + } else if (std.mem.startsWith(u8, trimmed, "should_override_forkchoice_update:")) { + if (extractValue(trimmed, "should_override_forkchoice_update:")) |val| { + // The value may span multiple lines, e.g.: + // should_override_forkchoice_update: {validator_is_connected: true, result: + // true} + // Concatenate lines until we find the closing '}'. + if (std.mem.indexOfScalar(u8, val, '}') != null) { + checks.should_override_forkchoice_update = try parseShouldOverrideFCU(val); + } else { + var buf: [512]u8 = undefined; + var pos: usize = 0; + const src = val; + @memcpy(buf[pos..][0..src.len], src); + pos += src.len; + var j = i + 1; + while (j < lines.len) : (j += 1) { + const next_trimmed = std.mem.trim(u8, lines[j], " \t\r"); + buf[pos] = ' '; + pos += 1; + @memcpy(buf[pos..][0..next_trimmed.len], next_trimmed); + pos += next_trimmed.len; + if (std.mem.indexOfScalar(u8, next_trimmed, '}') != null) break; + } + checks.should_override_forkchoice_update = try parseShouldOverrideFCU(buf[0..pos]); + i = j; + } + } + } + } + return checks; +} + +fn parseSubFieldHead(lines: []const []const u8) !CheckHead { + var slot: Slot = 0; + var root: Root = ZERO_HASH; + + for (lines[0..@min(2, lines.len)]) |line| { + const trimmed = std.mem.trim(u8, line, " \t\r"); + if (std.mem.startsWith(u8, trimmed, "slot:")) { + if (extractValue(trimmed, "slot:")) |val| { + slot = try std.fmt.parseInt(Slot, std.mem.trim(u8, val, " "), 10); + } + } else if (std.mem.startsWith(u8, trimmed, "root:")) { + if (extractValue(trimmed, "root:")) |val| { + root = try parseHexRoot(val); + } + } + } + return .{ .slot = slot, .root = root }; +} + +fn parseSubFieldCheckpoint(lines: []const []const u8) !CheckCheckpoint { + var epoch: Epoch = 0; + var root: Root = ZERO_HASH; + + for (lines[0..@min(2, lines.len)]) |line| { + const trimmed = std.mem.trim(u8, line, " \t\r"); + if (std.mem.startsWith(u8, trimmed, "epoch:")) { + if (extractValue(trimmed, "epoch:")) |val| { + epoch = try std.fmt.parseInt(Epoch, std.mem.trim(u8, val, " "), 10); + } + } else if (std.mem.startsWith(u8, trimmed, "root:")) { + if (extractValue(trimmed, "root:")) |val| { + root = try parseHexRoot(val); + } + } + } + return .{ .epoch = epoch, .root = root }; +} + +fn parseShouldOverrideFCU(val: []const u8) !ShouldOverrideFCU { + // Format: {validator_is_connected: true, result: true} + const inner = std.mem.trim(u8, val, " {}\t\r'"); + var result_val: bool = false; + var connected: bool = true; + + if (std.mem.indexOf(u8, inner, "result:")) |pos| { + const after = std.mem.trim(u8, inner[pos + "result:".len ..], " ,}"); + const end = std.mem.indexOf(u8, after, ",") orelse after.len; + result_val = std.mem.eql(u8, std.mem.trim(u8, after[0..end], " "), "true"); + } + if (std.mem.indexOf(u8, inner, "validator_is_connected:")) |pos| { + const after = std.mem.trim(u8, inner[pos + "validator_is_connected:".len ..], " ,}"); + const end = std.mem.indexOf(u8, after, ",") orelse after.len; + connected = std.mem.eql(u8, std.mem.trim(u8, after[0..end], " "), "true"); + } + + return .{ .result = result_val, .validator_is_connected = connected }; +} + +fn parseHexRoot(val: []const u8) !Root { + // Value is like: '0xabcdef...' or "0xabcdef..." + const trimmed = std.mem.trim(u8, val, " '\"\t\r"); + if (trimmed.len < 2) return ZERO_HASH; + const hex = if (std.mem.startsWith(u8, trimmed, "0x")) trimmed[2..] else trimmed; + if (hex.len != 64) return error.InvalidHexRoot; + var root: Root = undefined; + _ = try std.fmt.hexToBytes(&root, hex); + return root; +} + +fn extractFlowValue(text: []const u8, comptime key: []const u8) ?[]const u8 { + // Look for "key: value" or "key:value" in flow text + const key_colon = key ++ ":"; + const pos = std.mem.indexOf(u8, text, key_colon) orelse return null; + const after = text[pos + key_colon.len ..]; + return std.mem.trim(u8, after, " "); +} + +fn extractValue(text: []const u8, key: []const u8) ?[]const u8 { + if (std.mem.startsWith(u8, text, key)) { + return std.mem.trim(u8, text[key.len..], " "); + } + return null; +} + +fn freeStep(allocator: Allocator, step: *Step) void { + switch (step.*) { + .block => |b| { + allocator.free(b.name); + if (b.blobs) |bl| allocator.free(bl); + }, + .attestation => |a| allocator.free(a), + .attester_slashing => |s| allocator.free(s), + .pow_block => |p| allocator.free(p), + else => {}, + } +} + +fn freeSteps(allocator: Allocator, steps: []Step) void { + for (steps) |*step| { + freeStep(allocator, step); + } + allocator.free(steps); +} diff --git a/test/spec/runner_kind.zig b/test/spec/runner_kind.zig index 869ffe97f..a8c973027 100644 --- a/test/spec/runner_kind.zig +++ b/test/spec/runner_kind.zig @@ -1,6 +1,7 @@ pub const RunnerKind = enum { epoch_processing, fork, + fork_choice, finality, merkle_proof, operations, diff --git a/test/spec/test_case.zig b/test/spec/test_case.zig index 00c466199..0a051423c 100644 --- a/test/spec/test_case.zig +++ b/test/spec/test_case.zig @@ -6,6 +6,7 @@ const isFixedType = @import("ssz").isFixedType; const state_transition = @import("state_transition"); const Node = @import("persistent_merkle_tree").Node; const AnySignedBeaconBlock = @import("fork_types").AnySignedBeaconBlock; +const AnyBeaconBlock = @import("fork_types").AnyBeaconBlock; const AnyBeaconState = @import("fork_types").AnyBeaconState; const TestCachedBeaconState = state_transition.test_utils.TestCachedBeaconState; @@ -89,6 +90,26 @@ pub fn TestCaseUtils(comptime fork: ForkSeq) type { return try TestCachedBeaconState.initFromState(allocator, pool, pre_state_all_forks, fork, fork_epoch); } + pub fn loadPreStateFromFile(allocator: Allocator, pool: *Node.Pool, dir: std.Io.Dir, file_name: []const u8) !TestCachedBeaconState { + var pre_state = ForkTypes.BeaconState.default_value; + try loadSszSnappyValue(ForkTypes.BeaconState, allocator, dir, file_name, &pre_state); + defer ForkTypes.BeaconState.deinit(allocator, &pre_state); + + const pre_state_all_forks = try allocator.create(AnyBeaconState); + errdefer allocator.destroy(pre_state_all_forks); + + pre_state_all_forks.* = @unionInit( + AnyBeaconState, + fork.name(), + try ForkTypes.BeaconState.TreeView.fromValue(allocator, pool, &pre_state), + ); + errdefer pre_state_all_forks.deinit(); + + var f = try pre_state_all_forks.fork(); + const fork_epoch = try f.get("epoch"); + return try TestCachedBeaconState.initFromState(allocator, pool, pre_state_all_forks, fork, fork_epoch); + } + /// consumer should deinit the returned state and destroy the pointer pub fn loadPostState(allocator: Allocator, pool: *Node.Pool, dir: std.Io.Dir) !?*AnyBeaconState { var post_state = ForkTypes.BeaconState.default_value; @@ -273,6 +294,143 @@ pub fn loadSszSnappyValue(comptime ST: type, allocator: std.mem.Allocator, dir: } } +/// load BeaconBlock (unsigned) from file using runtime fork +/// consumer should deinit the returned block and destroy the pointer +pub fn loadBeaconBlock(allocator: std.mem.Allocator, fork: ForkSeq, dir: std.Io.Dir, file_name: []const u8) !AnyBeaconBlock { + return switch (fork) { + .phase0 => blk: { + const out = try allocator.create(phase0.BeaconBlock.Type); + out.* = phase0.BeaconBlock.default_value; + try loadSszSnappyValue(types.phase0.BeaconBlock, allocator, dir, file_name, out); + break :blk AnyBeaconBlock{ + .phase0 = out, + }; + }, + .altair => blk: { + const out = try allocator.create(altair.BeaconBlock.Type); + out.* = altair.BeaconBlock.default_value; + try loadSszSnappyValue(types.altair.BeaconBlock, allocator, dir, file_name, out); + break :blk AnyBeaconBlock{ + .altair = out, + }; + }, + .bellatrix => blk: { + const out = try allocator.create(bellatrix.BeaconBlock.Type); + out.* = bellatrix.BeaconBlock.default_value; + try loadSszSnappyValue(types.bellatrix.BeaconBlock, allocator, dir, file_name, out); + break :blk AnyBeaconBlock{ + .full_bellatrix = out, + }; + }, + .capella => blk: { + const out = try allocator.create(capella.BeaconBlock.Type); + out.* = capella.BeaconBlock.default_value; + try loadSszSnappyValue(types.capella.BeaconBlock, allocator, dir, file_name, out); + break :blk AnyBeaconBlock{ + .full_capella = out, + }; + }, + .deneb => blk: { + const out = try allocator.create(deneb.BeaconBlock.Type); + out.* = deneb.BeaconBlock.default_value; + try loadSszSnappyValue(types.deneb.BeaconBlock, allocator, dir, file_name, out); + break :blk AnyBeaconBlock{ + .full_deneb = out, + }; + }, + .electra => blk: { + const out = try allocator.create(electra.BeaconBlock.Type); + out.* = electra.BeaconBlock.default_value; + try loadSszSnappyValue(types.electra.BeaconBlock, allocator, dir, file_name, out); + break :blk AnyBeaconBlock{ + .full_electra = out, + }; + }, + .fulu => blk: { + const out = try allocator.create(fulu.BeaconBlock.Type); + out.* = fulu.BeaconBlock.default_value; + try loadSszSnappyValue(types.fulu.BeaconBlock, allocator, dir, file_name, out); + break :blk AnyBeaconBlock{ + .full_fulu = out, + }; + }, + .gloas => blk: { + const out = try allocator.create(gloas.BeaconBlock.Type); + out.* = gloas.BeaconBlock.default_value; + try loadSszSnappyValue(types.gloas.BeaconBlock, allocator, dir, file_name, out); + break :blk AnyBeaconBlock{ + .full_gloas = out, + }; + }, + }; +} + +pub fn deinitBeaconBlock(block: AnyBeaconBlock, allocator: std.mem.Allocator) void { + switch (block) { + .phase0 => |b| { + phase0.BeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .altair => |b| { + altair.BeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .full_bellatrix => |b| { + bellatrix.BeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .blinded_bellatrix => |b| { + bellatrix.BlindedBeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .full_capella => |b| { + capella.BeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .blinded_capella => |b| { + capella.BlindedBeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .full_deneb => |b| { + deneb.BeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .blinded_deneb => |b| { + deneb.BlindedBeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .full_electra => |b| { + electra.BeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .blinded_electra => |b| { + electra.BlindedBeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .full_fulu => |b| { + fulu.BeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .blinded_fulu => |b| { + fulu.BlindedBeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + .full_gloas => |b| { + gloas.BeaconBlock.deinit(allocator, @constCast(b)); + allocator.destroy(b); + }, + } +} + +/// Returns the decompressed SSZ size of a snappy-compressed file without full deserialization. +pub fn getSszSnappyDecompressedSize(allocator: std.mem.Allocator, dir: std.Io.Dir, file_name: []const u8) !usize { + const io = std.testing.io; + const value_bytes = try dir.readFileAlloc(io, file_name, allocator, .unlimited); + defer allocator.free(value_bytes); + + return snappy.uncompressedLength(value_bytes); +} + pub fn expectEqualBeaconStates(expected: *AnyBeaconState, actual: *AnyBeaconState) !void { if (expected.forkSeq() != actual.forkSeq()) return error.ForkMismatch; diff --git a/test/spec/version.txt b/test/spec/version.txt index 4519d7307..b2e6196ed 100644 --- a/test/spec/version.txt +++ b/test/spec/version.txt @@ -2,4 +2,4 @@ // This file exists as a cache key for the spec tests in CI. // Do not commit changes by hand. -v1.6.0-beta.2 +v1.7.0-alpha.5 diff --git a/test/spec/write_spec_tests.zig b/test/spec/write_spec_tests.zig index 516074c06..40fbe8579 100644 --- a/test/spec/write_spec_tests.zig +++ b/test/spec/write_spec_tests.zig @@ -20,6 +20,7 @@ const supported_test_runners = [_]RunnerKind{ .sanity, .epoch_processing, .fork, + .fork_choice, .transition, .random, .finality, @@ -33,6 +34,7 @@ fn TestWriter(comptime kind: RunnerKind) type { .sanity => @import("./writer/sanity.zig"), .epoch_processing => @import("./writer/epoch_processing.zig"), .fork => @import("./writer/fork.zig"), + .fork_choice => @import("./writer/fork_choice.zig"), .transition => @import("./writer/transition.zig"), .random => @import("./writer/random.zig"), .finality => @import("./writer/finality.zig"), diff --git a/test/spec/writer/fork_choice.zig b/test/spec/writer/fork_choice.zig new file mode 100644 index 000000000..144eb99cf --- /dev/null +++ b/test/spec/writer/fork_choice.zig @@ -0,0 +1,98 @@ +const std = @import("std"); +const ForkSeq = @import("config").ForkSeq; + +pub const Handler = enum { + deposit_with_reorg, + ex_ante, + get_head, + get_proposer_head, + on_block, + on_merge_block, + reorg, + should_override_forkchoice_update, + withholding, + + pub fn suiteName(self: Handler) []const u8 { + return @tagName(self) ++ "/pyspec_tests"; + } +}; + +pub const handlers = std.enums.values(Handler); + +pub const header = + \\// This file is generated by write_spec_tests.zig. + \\// Do not commit changes by hand. + \\ + \\const std = @import("std"); + \\const Node = @import("persistent_merkle_tree").Node; + \\const ForkSeq = @import("config").ForkSeq; + \\const active_preset = @import("preset").active_preset; + \\const spec_test_options = @import("spec_test_options"); + \\const ForkChoiceRunner = @import("../runner/fork_choice.zig"); + \\ + \\const allocator = std.testing.allocator; + \\const pool_size = if (active_preset == .mainnet) 10_000_000 else 1_000_000; + \\ + \\ +; + +const test_template = + \\test "{s} fork_choice {s} {s}" {{ + \\ var pool = try Node.Pool.init(allocator, pool_size); + \\ defer pool.deinit(); + \\ const test_dir_name = try std.fs.path.join(allocator, &[_][]const u8{{ + \\ spec_test_options.spec_test_out_dir, + \\ spec_test_options.spec_test_version, + \\ @tagName(active_preset) ++ "/tests/" ++ @tagName(active_preset) ++ "/{s}/fork_choice/{s}/pyspec_tests/{s}", + \\ }}); + \\ defer allocator.free(test_dir_name); + \\ const test_dir = std.Io.Dir.openDir(.cwd(), std.testing.io, test_dir_name, .{{}}) catch return error.SkipZigTest; + \\ + \\ try ForkChoiceRunner.TestCase(.{s}).execute(allocator, &pool, test_dir); + \\}} + \\ + \\ +; + +pub fn writeHeader(writer: *std.Io.Writer) !void { + try writer.print(header, .{}); +} + +/// Tests skipped to match Lodestar TS unstable. +/// TODO GLOAS: proposer boost specs changed retroactively in v1.7.0-alpha.1; +/// remove once update_proposer_boost_root is implemented. +/// invalid_incorrect_proof: no KZG verification for minimal preset. +const skip_patterns = [_][]const u8{ + "voting_source_beyond_two_epoch", + "justified_update_always_if_better", + "justified_update_not_realized_finality", + "invalid_incorrect_proof", +}; + +fn shouldSkip(test_case_name: []const u8) bool { + for (skip_patterns) |pattern| { + if (std.mem.indexOf(u8, test_case_name, pattern) != null) return true; + } + return false; +} + +pub fn writeTest( + writer: *std.Io.Writer, + fork: ForkSeq, + handler: Handler, + test_case_name: []const u8, +) !void { + if (shouldSkip(test_case_name)) return; + + try writer.print(test_template, .{ + @tagName(fork), + @tagName(handler), + test_case_name, + + @tagName(fork), + @tagName(handler), + test_case_name, + + @tagName(fork), + }); +}